mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-31 09:10:38 +00:00
Compare commits
615 Commits
projects-m
...
persistent
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8261455019 | ||
|
|
aad88d6c39 | ||
|
|
6188315b51 | ||
|
|
3a4b932d8a | ||
|
|
cc2b3c986c | ||
|
|
c250c2664b | ||
|
|
e5550a01b0 | ||
|
|
45617ceaef | ||
|
|
29b39301fe | ||
|
|
b01a93be60 | ||
|
|
4c68d019e3 | ||
|
|
85f0975c5a | ||
|
|
1af087449a | ||
|
|
37625c4433 | ||
|
|
e9f4ca5972 | ||
|
|
4bf3087aed | ||
|
|
9470bc9fe0 | ||
|
|
86e483f87b | ||
|
|
f50d0ec0c9 | ||
|
|
74ec36a1bf | ||
|
|
a63ebb6446 | ||
|
|
a5b898a31c | ||
|
|
c6f095a821 | ||
|
|
6b2bc7f775 | ||
|
|
6c97fc941a | ||
|
|
cb9b26776e | ||
|
|
684329d4d2 | ||
|
|
ed40a045c0 | ||
|
|
3f39327622 | ||
|
|
a50a7e8ac0 | ||
|
|
e28eda7939 | ||
|
|
f564dff0e3 | ||
|
|
d783889a1f | ||
|
|
2655bdbb2e | ||
|
|
b9152f1ef4 | ||
|
|
328ec1ce24 | ||
|
|
dcb79ef08f | ||
|
|
fd99e0fbc4 | ||
|
|
60ac227196 | ||
|
|
4a60051b0d | ||
|
|
24d3ed0952 | ||
|
|
0a87d71294 | ||
|
|
150bddb929 | ||
|
|
2b728bc69e | ||
|
|
5184685ced | ||
|
|
9ae4da4f31 | ||
|
|
aca221ac8b | ||
|
|
d013a2b227 | ||
|
|
3f93c6c6f0 | ||
|
|
53267969d7 | ||
|
|
c4b417ecdb | ||
|
|
1d105727cb | ||
|
|
4787a744c2 | ||
|
|
ac3ccac56c | ||
|
|
638af96c51 | ||
|
|
1e21ca1afe | ||
|
|
46d30bf054 | ||
|
|
d0105cea1f | ||
|
|
e44e4a699b | ||
|
|
223834a420 | ||
|
|
01778e37cc | ||
|
|
03190a2161 | ||
|
|
f87017c04d | ||
|
|
c11cbf0f5c | ||
|
|
f30ef00439 | ||
|
|
dbe5b52494 | ||
|
|
4131a6efae | ||
|
|
03695261fc | ||
|
|
7fd88fab59 | ||
|
|
7edc098c40 | ||
|
|
8421218152 | ||
|
|
d5b7832c21 | ||
|
|
c6072d38c2 | ||
|
|
175779c0ef | ||
|
|
8654e95fae | ||
|
|
f720dd735e | ||
|
|
c4f9f1dc6d | ||
|
|
4a10e1b066 | ||
|
|
b55466045e | ||
|
|
e999f66b01 | ||
|
|
1cf257bc4a | ||
|
|
40164bd589 | ||
|
|
c3a470a29b | ||
|
|
c1a76eb0e5 | ||
|
|
d5b6471fa9 | ||
|
|
548d472b12 | ||
|
|
99e745a760 | ||
|
|
15d970f731 | ||
|
|
7b7f84f1b4 | ||
|
|
bc40a5595f | ||
|
|
07b3ba5ce3 | ||
|
|
c38f38dab7 | ||
|
|
71d268c7c4 | ||
|
|
cf68963b18 | ||
|
|
63221e4b42 | ||
|
|
d7eeb73f6f | ||
|
|
5112142997 | ||
|
|
a0a74868a4 | ||
|
|
b154992510 | ||
|
|
a86a38c96e | ||
|
|
590f894db8 | ||
|
|
0a0595b98d | ||
|
|
e56d11c8e1 | ||
|
|
ccdc3188ed | ||
|
|
67401cbdb8 | ||
|
|
d42700280f | ||
|
|
6df4d5c911 | ||
|
|
32d14403bd | ||
|
|
0df3467146 | ||
|
|
c64a121aa8 | ||
|
|
22cc8760b9 | ||
|
|
596d622a82 | ||
|
|
7481fb082c | ||
|
|
1eb9bd052a | ||
|
|
59a3ca4ec6 | ||
|
|
e86a9105a4 | ||
|
|
d3c8749da5 | ||
|
|
128dc8d405 | ||
|
|
0cbae6e8f3 | ||
|
|
78e412b84b | ||
|
|
6dbf202e0d | ||
|
|
b42bf9265a | ||
|
|
1f08ba5790 | ||
|
|
0c54eb65fb | ||
|
|
259a5f356e | ||
|
|
a3cb8c11e0 | ||
|
|
9fb2287f87 | ||
|
|
834ffe1bac | ||
|
|
df18b041c0 | ||
|
|
39897105b2 | ||
|
|
2f399f08b2 | ||
|
|
9f49605041 | ||
|
|
7b6431cbd7 | ||
|
|
321aeac3d4 | ||
|
|
71ef7b6663 | ||
|
|
5928cb33c5 | ||
|
|
6ff2c61ae0 | ||
|
|
7480a0338a | ||
|
|
2709878b8b | ||
|
|
39e4bdb99e | ||
|
|
52e75fead9 | ||
|
|
a347d2b6ac | ||
|
|
fc4ea3553e | ||
|
|
cca1ace651 | ||
|
|
30984c163c | ||
|
|
7404777efc | ||
|
|
eb1bdcc6cf | ||
|
|
f5ab9f761b | ||
|
|
306a47c4fa | ||
|
|
84c5f681b0 | ||
|
|
50297bef9f | ||
|
|
9211923bef | ||
|
|
7734929a82 | ||
|
|
bc5ec43056 | ||
|
|
b237feedab | ||
|
|
4d1e48f3b9 | ||
|
|
7576b18b14 | ||
|
|
6b49b370fc | ||
|
|
91411c415a | ||
|
|
c67cf34040 | ||
|
|
8fbe437768 | ||
|
|
989d78aac8 | ||
|
|
7ca72578f9 | ||
|
|
41550ec8bf | ||
|
|
0cd2d91b9d | ||
|
|
546e9bdbec | ||
|
|
59bc7e67e0 | ||
|
|
2418e72649 | ||
|
|
80746b1c7a | ||
|
|
129f7c82b7 | ||
|
|
0ec5ddea0b | ||
|
|
c4ee62d427 | ||
|
|
c709354579 | ||
|
|
5d6553d41d | ||
|
|
f03b7c3458 | ||
|
|
9c24de254f | ||
|
|
538876650a | ||
|
|
500239176c | ||
|
|
ee64a6b80b | ||
|
|
a13b486943 | ||
|
|
9fe4548e13 | ||
|
|
14c623b254 | ||
|
|
ebf54b0de0 | ||
|
|
09dda35dac | ||
|
|
6ace79345d | ||
|
|
771e61425e | ||
|
|
93775f6ca7 | ||
|
|
6d0dacc4ce | ||
|
|
e5e40a31f4 | ||
|
|
676c63c329 | ||
|
|
47366522a8 | ||
|
|
db26bc49cc | ||
|
|
e520293090 | ||
|
|
241e549757 | ||
|
|
34bea270f0 | ||
|
|
13f0e7a5b4 | ||
|
|
3e35f10adc | ||
|
|
3be3bb7730 | ||
|
|
01d2c52c82 | ||
|
|
9f79e7edea | ||
|
|
a22165d41e | ||
|
|
725be60bb7 | ||
|
|
e516c376d6 | ||
|
|
8e51c27e1a | ||
|
|
9e1eb69d55 | ||
|
|
687ba81366 | ||
|
|
47bae68a2e | ||
|
|
e8b195acb7 | ||
|
|
254cb7dc4f | ||
|
|
ed85d97f17 | ||
|
|
4a216c5f7f | ||
|
|
c5a428a61a | ||
|
|
ff8c481777 | ||
|
|
f25dd75be9 | ||
|
|
b99bed510d | ||
|
|
580584c8fc | ||
|
|
d823e84ed5 | ||
|
|
231dfbaed6 | ||
|
|
5cf53786f9 | ||
|
|
9b9bbad462 | ||
|
|
537b2c1ae6 | ||
|
|
31123d1fa8 | ||
|
|
4f2ac51bdd | ||
|
|
7b2f9dc908 | ||
|
|
dabb6d2675 | ||
|
|
fc7087b16f | ||
|
|
2233ca2a39 | ||
|
|
fb68d01449 | ||
|
|
d15116f2cc | ||
|
|
df45c0d0e5 | ||
|
|
367cc01290 | ||
|
|
1165686201 | ||
|
|
093264a695 | ||
|
|
805bb198c2 | ||
|
|
5ccd54c699 | ||
|
|
1dffba9de6 | ||
|
|
ebab89ebd2 | ||
|
|
bc3ba23e0a | ||
|
|
3e65209a06 | ||
|
|
eb0c6bcf1a | ||
|
|
52819898e4 | ||
|
|
b0377f750a | ||
|
|
43560506c0 | ||
|
|
c81ede8644 | ||
|
|
eb9200abc8 | ||
|
|
7c1695e87d | ||
|
|
8b42c184e7 | ||
|
|
7138db9279 | ||
|
|
262fa3be09 | ||
|
|
5e151192f5 | ||
|
|
2d012f0d32 | ||
|
|
64f64d5637 | ||
|
|
1fa7d6aebf | ||
|
|
d098542dde | ||
|
|
eba419fda3 | ||
|
|
d8d3cd49f4 | ||
|
|
3618c242b9 | ||
|
|
ed6b75e301 | ||
|
|
862902f9e5 | ||
|
|
8d890b3cbb | ||
|
|
0fde59aa46 | ||
|
|
1255ef806f | ||
|
|
5dddeb8d88 | ||
|
|
d45de3d58f | ||
|
|
a69e060f0f | ||
|
|
a4397d43e9 | ||
|
|
03c606f7c5 | ||
|
|
9dfede8146 | ||
|
|
86bf491981 | ||
|
|
e764c1e60f | ||
|
|
f3073a4db9 | ||
|
|
e9a103c09f | ||
|
|
7eebb45ea6 | ||
|
|
19fa410ff8 | ||
|
|
b82e2e3f18 | ||
|
|
71c92e0db1 | ||
|
|
6f949e1556 | ||
|
|
8d7024a8c2 | ||
|
|
6b8dcad1bb | ||
|
|
310c507303 | ||
|
|
6fc719db13 | ||
|
|
4a3b3ff11d | ||
|
|
4b25b9652a | ||
|
|
a5019bf771 | ||
|
|
7863c4a702 | ||
|
|
566e816298 | ||
|
|
e4f775436f | ||
|
|
bb3c66d86f | ||
|
|
6985f6cd6c | ||
|
|
fcb4a61a12 | ||
|
|
4b5e7f2f82 | ||
|
|
d11cb4b2f1 | ||
|
|
90ed12630e | ||
|
|
846d126579 | ||
|
|
c9c3c77c31 | ||
|
|
b46c8b4ae0 | ||
|
|
65a5010e25 | ||
|
|
9c35a09452 | ||
|
|
44fd4e3c9f | ||
|
|
4db15d3c7c | ||
|
|
72b33997c7 | ||
|
|
74312e268f | ||
|
|
db5ec0dae7 | ||
|
|
031e57a973 | ||
|
|
96e867642f | ||
|
|
e968b5e502 | ||
|
|
9d9d8e9519 | ||
|
|
1062e57fee | ||
|
|
a8d9732529 | ||
|
|
757e2147c1 | ||
|
|
87bf7be537 | ||
|
|
f86ea09323 | ||
|
|
d87c9e62d6 | ||
|
|
c3096532f9 | ||
|
|
6db6e7ddda | ||
|
|
b8eb908a3d | ||
|
|
260ec20a02 | ||
|
|
ba8698bbcb | ||
|
|
35761ac6b6 | ||
|
|
32b7259d5e | ||
|
|
1d53173e62 | ||
|
|
d4d57ea2dd | ||
|
|
db0c49148d | ||
|
|
59d04ab66a | ||
|
|
1a8c8b04d7 | ||
|
|
f44afbaf62 | ||
|
|
4f7557fb58 | ||
|
|
2a837d7de7 | ||
|
|
40c845e57d | ||
|
|
698d6d0bad | ||
|
|
a48f9f377d | ||
|
|
18dafbb9ba | ||
|
|
648e86e9df | ||
|
|
923f642549 | ||
|
|
31ec3b7906 | ||
|
|
c9e7c2f014 | ||
|
|
d3f83eda52 | ||
|
|
0b76b82e0e | ||
|
|
35b4816f09 | ||
|
|
171385ac14 | ||
|
|
1351beae19 | ||
|
|
9e3136ea37 | ||
|
|
83dca73f85 | ||
|
|
dc2150a90e | ||
|
|
2794cd83c7 | ||
|
|
65b592d4bd | ||
|
|
f441fe57d4 | ||
|
|
cf157ad8e4 | ||
|
|
f081419e68 | ||
|
|
05e263d0d3 | ||
|
|
ee0071e90d | ||
|
|
772078eb5c | ||
|
|
ad057124be | ||
|
|
aeb1cf9c36 | ||
|
|
7a3e8bb7fb | ||
|
|
846d71b948 | ||
|
|
2b6c49b2ea | ||
|
|
eef7475408 | ||
|
|
71c965b0e1 | ||
|
|
a4e79db348 | ||
|
|
a463749f59 | ||
|
|
73f926c39a | ||
|
|
8b28adb6a6 | ||
|
|
827c3013bd | ||
|
|
2db20e5587 | ||
|
|
f78a542cba | ||
|
|
8a7333438a | ||
|
|
47bd307cb8 | ||
|
|
f0a0d7bb7a | ||
|
|
40813adba2 | ||
|
|
15c5f3e6cf | ||
|
|
46c8a93976 | ||
|
|
13beeb59cd | ||
|
|
d7c9cfe7bb | ||
|
|
5745dbdd33 | ||
|
|
a4803233bb | ||
|
|
f09bd6bc88 | ||
|
|
3aca717f3d | ||
|
|
96a50e99cf | ||
|
|
60408db101 | ||
|
|
07b4ace52f | ||
|
|
ee8b5f967d | ||
|
|
1324dd89ed | ||
|
|
bfa1d91612 | ||
|
|
7a840ec60c | ||
|
|
5f189cd385 | ||
|
|
f8188e679c | ||
|
|
34b5d7aa9f | ||
|
|
88a339ed73 | ||
|
|
ec20534173 | ||
|
|
c0a867d86f | ||
|
|
6d30e21a32 | ||
|
|
a56ae15edf | ||
|
|
a5ca6a9d2b | ||
|
|
04a018a5b1 | ||
|
|
bc588f3a53 | ||
|
|
c952f022bb | ||
|
|
f67d109e6e | ||
|
|
344db0b4aa | ||
|
|
0c8ee6bd1d | ||
|
|
8e1d6dd848 | ||
|
|
4013290508 | ||
|
|
5f0c95182d | ||
|
|
63b9dfb2f2 | ||
|
|
1a666a01d6 | ||
|
|
d110d2c2fd | ||
|
|
b98fa5d6b0 | ||
|
|
9dd19ec397 | ||
|
|
832e60c2b4 | ||
|
|
6dc56a9be1 | ||
|
|
39a3bcac36 | ||
|
|
ae3227509c | ||
|
|
4c2bb43775 | ||
|
|
6b2e1d9065 | ||
|
|
277f2d6d3d | ||
|
|
7779308985 | ||
|
|
32be8739b9 | ||
|
|
631cbf5b1b | ||
|
|
5522fbab25 | ||
|
|
d48177d0d8 | ||
|
|
84cd40b416 | ||
|
|
daba4c7405 | ||
|
|
8ac5a285a1 | ||
|
|
aaa60c92ca | ||
|
|
187a760409 | ||
|
|
c634cb1d36 | ||
|
|
c19b4a65f9 | ||
|
|
8043612334 | ||
|
|
12e87f0df3 | ||
|
|
6b9cef02a1 | ||
|
|
37d90dc3b3 | ||
|
|
a185821d6f | ||
|
|
f99ccb5041 | ||
|
|
2db675a2f2 | ||
|
|
77a2bdf3d7 | ||
|
|
976576ae59 | ||
|
|
1a07ddae5f | ||
|
|
9bc12f7444 | ||
|
|
92bdf04758 | ||
|
|
67e091c906 | ||
|
|
dc102197df | ||
|
|
262cdf8344 | ||
|
|
3b819ee159 | ||
|
|
e9a3499e87 | ||
|
|
3414feae03 | ||
|
|
e94a5ce360 | ||
|
|
d5ec84b87b | ||
|
|
b21f7382cc | ||
|
|
648e8bbefe | ||
|
|
9218426e41 | ||
|
|
1d4114183c | ||
|
|
4cde0e7a37 | ||
|
|
83f7b8ed22 | ||
|
|
b8f0f37de2 | ||
|
|
18f251384d | ||
|
|
4cddb0f1a4 | ||
|
|
7b12deead7 | ||
|
|
63a72d99bb | ||
|
|
116ecdf87a | ||
|
|
431393e361 | ||
|
|
f38f45b01d | ||
|
|
a5154dce3e | ||
|
|
da5f8486ce | ||
|
|
ad08c273d3 | ||
|
|
7f97269277 | ||
|
|
6d99b4f1d8 | ||
|
|
a7bf60631f | ||
|
|
07bb7a2afe | ||
|
|
142e247e85 | ||
|
|
7da47d8a0a | ||
|
|
dc52436a8f | ||
|
|
995a2de21e | ||
|
|
e593cbaaba | ||
|
|
4b9e02be45 | ||
|
|
7a36d06cc2 | ||
|
|
4227cfc96e | ||
|
|
1fc761983f | ||
|
|
227d47d2f3 | ||
|
|
0290893bcc | ||
|
|
32fd709b34 | ||
|
|
3a9bff81db | ||
|
|
743370de98 | ||
|
|
cdfa9fe705 | ||
|
|
7cd68a0c27 | ||
|
|
beaa991f81 | ||
|
|
9430abae05 | ||
|
|
4da4c7f769 | ||
|
|
0d14d4a1a8 | ||
|
|
8c8431ebc6 | ||
|
|
84d1bc06a9 | ||
|
|
5133db44e1 | ||
|
|
4cb1074fe5 | ||
|
|
0a958b0ea1 | ||
|
|
1bbc8090f3 | ||
|
|
f7d8db7e39 | ||
|
|
e54941b811 | ||
|
|
52ce1c9d53 | ||
|
|
bc2cb5382b | ||
|
|
5f71aa09d3 | ||
|
|
b4f2c5b514 | ||
|
|
71f39bac3d | ||
|
|
177d5b1f22 | ||
|
|
8ba41b8c18 | ||
|
|
1edf3eb2c8 | ||
|
|
0ebb6bc4b0 | ||
|
|
092a9b74d3 | ||
|
|
e73b95a09d | ||
|
|
539007c173 | ||
|
|
d0494c391a | ||
|
|
2af5a96f0d | ||
|
|
9733b24f4a | ||
|
|
d865892a06 | ||
|
|
a0f76253f8 | ||
|
|
02afa2762c | ||
|
|
d903dd61bd | ||
|
|
417d9e9db2 | ||
|
|
6ace347175 | ||
|
|
14a027cce5 | ||
|
|
09ddd34b2a | ||
|
|
aeb3f0ea07 | ||
|
|
58b04438f0 | ||
|
|
01f1f1c1bf | ||
|
|
6a664629fa | ||
|
|
f6f29f58cd | ||
|
|
fd46e52e00 | ||
|
|
d6f12cff8e | ||
|
|
5a4394a8df | ||
|
|
d301b8364c | ||
|
|
172314155e | ||
|
|
28243d68e6 | ||
|
|
45680f9a2d | ||
|
|
5f4ccae5c5 | ||
|
|
39c59b8df5 | ||
|
|
9dcb9ca3da | ||
|
|
e308265e42 | ||
|
|
ed102f44d9 | ||
|
|
572ae74388 | ||
|
|
b445cf7665 | ||
|
|
cc680dd81c | ||
|
|
f4233fde39 | ||
|
|
b4c74c0ecd | ||
|
|
abff15dd7c | ||
|
|
160e52ec7e | ||
|
|
98dd2e4f52 | ||
|
|
71753dd947 | ||
|
|
4446791397 | ||
|
|
5ff7a7dd8b | ||
|
|
3dce394197 | ||
|
|
df7f644822 | ||
|
|
bf5333544f | ||
|
|
0b8049c283 | ||
|
|
f384e20d78 | ||
|
|
0b14fdb078 | ||
|
|
a69fdb0e8e | ||
|
|
eeff56aeb7 | ||
|
|
7987889cb3 | ||
|
|
912a08317b | ||
|
|
c4b2347e21 | ||
|
|
373bc59ebe | ||
|
|
94003e1ebc | ||
|
|
19ea486cde | ||
|
|
95c40334b8 | ||
|
|
a68d5a0173 | ||
|
|
c690522870 | ||
|
|
eaa550afcc | ||
|
|
a490f64a68 | ||
|
|
fe65d1df74 | ||
|
|
c68336a246 | ||
|
|
0886aced86 | ||
|
|
a342957aee | ||
|
|
79f5685d00 | ||
|
|
c004a6d62f | ||
|
|
1b6a80a38f | ||
|
|
12bac9c12b | ||
|
|
9a7427c203 | ||
|
|
968c20ca5f | ||
|
|
f8a64512df | ||
|
|
07acd6ddde | ||
|
|
2b21d7b5bc | ||
|
|
61cc562822 | ||
|
|
7c041d9939 | ||
|
|
7f048abf3b | ||
|
|
5cf94a5848 | ||
|
|
5cf597044d | ||
|
|
95452e605a | ||
|
|
21da9199fa | ||
|
|
39d86ed29e | ||
|
|
f540f115a3 | ||
|
|
0b5b2e8e0b | ||
|
|
60e5dc10e6 | ||
|
|
1f5918b36d | ||
|
|
80b7a3b51a | ||
|
|
85bda437de | ||
|
|
52f445094a | ||
|
|
bcdee3d3b5 | ||
|
|
c08fa9d562 | ||
|
|
00c26ff3a3 | ||
|
|
ec0faf3ac6 | ||
|
|
1a5af6d7a5 | ||
|
|
520ffb341b | ||
|
|
9f2b40645d | ||
|
|
168214e0b6 | ||
|
|
d9d4ef12c3 | ||
|
|
e1e24336b7 | ||
|
|
4c54e4b37d | ||
|
|
ae116ff0a9 | ||
|
|
e6ea049165 | ||
|
|
747d009bb4 | ||
|
|
cb5df3c627 | ||
|
|
0e3456351f | ||
|
|
1faf49da0f | ||
|
|
4a96259bdd | ||
|
|
242af75653 | ||
|
|
8fabdc6708 | ||
|
|
07df7c2edd |
16
.cargo/config.toml
Normal file
16
.cargo/config.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# The binaries are really slow, if you compile them in 'dev' mode with the defaults.
|
||||||
|
# Enable some optimizations even in 'dev' mode, to make tests faster. The basic
|
||||||
|
# optimizations enabled by "opt-level=1" don't affect debuggability too much.
|
||||||
|
#
|
||||||
|
# See https://www.reddit.com/r/rust/comments/gvrgca/this_is_a_neat_trick_for_getting_good_runtime/
|
||||||
|
#
|
||||||
|
[profile.dev.package."*"]
|
||||||
|
# Set the default for dependencies in Development mode.
|
||||||
|
opt-level = 3
|
||||||
|
|
||||||
|
[profile.dev]
|
||||||
|
# Turn on a small amount of optimization in Development mode.
|
||||||
|
opt-level = 1
|
||||||
|
|
||||||
|
[alias]
|
||||||
|
build_testing = ["build", "--features", "testing"]
|
||||||
4
.circleci/ansible/.gitignore
vendored
4
.circleci/ansible/.gitignore
vendored
@@ -1,4 +0,0 @@
|
|||||||
zenith_install.tar.gz
|
|
||||||
.zenith_current_version
|
|
||||||
neon_install.tar.gz
|
|
||||||
.neon_current_version
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
RELEASE=${RELEASE:-false}
|
|
||||||
|
|
||||||
# look at docker hub for latest tag for neon docker image
|
|
||||||
if [ "${RELEASE}" = "true" ]; then
|
|
||||||
echo "search latest release tag"
|
|
||||||
VERSION=$(curl -s https://registry.hub.docker.com/v1/repositories/neondatabase/neon/tags |jq -r -S '.[].name' | grep release | sed 's/release-//g' | grep -E '^[0-9]+$' | sort -n | tail -1)
|
|
||||||
if [ -z "${VERSION}" ]; then
|
|
||||||
echo "no any docker tags found, exiting..."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
TAG="release-${VERSION}"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "search latest dev tag"
|
|
||||||
VERSION=$(curl -s https://registry.hub.docker.com/v1/repositories/neondatabase/neon/tags |jq -r -S '.[].name' | grep -E '^[0-9]+$' | sort -n | tail -1)
|
|
||||||
if [ -z "${VERSION}" ]; then
|
|
||||||
echo "no any docker tags found, exiting..."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
TAG="${VERSION}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "found ${VERSION}"
|
|
||||||
|
|
||||||
# do initial cleanup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz neon_install.tar.gz .neon_current_version
|
|
||||||
mkdir neon_install
|
|
||||||
|
|
||||||
# retrieve binaries from docker image
|
|
||||||
echo "getting binaries from docker image"
|
|
||||||
docker pull --quiet neondatabase/neon:${TAG}
|
|
||||||
ID=$(docker create neondatabase/neon:${TAG})
|
|
||||||
docker cp ${ID}:/data/postgres_install.tar.gz .
|
|
||||||
tar -xzf postgres_install.tar.gz -C neon_install
|
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/postgres neon_install/bin/
|
|
||||||
docker rm -vf ${ID}
|
|
||||||
|
|
||||||
# store version to file (for ansible playbooks) and create binaries tarball
|
|
||||||
echo ${VERSION} > neon_install/.neon_current_version
|
|
||||||
echo ${VERSION} > .neon_current_version
|
|
||||||
tar -czf neon_install.tar.gz -C neon_install .
|
|
||||||
|
|
||||||
# do final cleaup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
neon-stress-ps-1 console_region_id=1
|
|
||||||
neon-stress-ps-2 console_region_id=1
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
neon-stress-sk-1 console_region_id=1
|
|
||||||
neon-stress-sk-2 console_region_id=1
|
|
||||||
neon-stress-sk-3 console_region_id=1
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = neon-stress
|
|
||||||
console_mgmt_base_url = http://neon-stress-console.local
|
|
||||||
bucket_name = neon-storage-ireland
|
|
||||||
bucket_region = eu-west-1
|
|
||||||
etcd_endpoints = etcd-stress.local:2379
|
|
||||||
safekeeper_enable_s3_offload = false
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
#zenith-1-ps-1 console_region_id=1
|
|
||||||
zenith-1-ps-2 console_region_id=1
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
zenith-1-sk-1 console_region_id=1
|
|
||||||
zenith-1-sk-2 console_region_id=1
|
|
||||||
zenith-1-sk-3 console_region_id=1
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = prod-1
|
|
||||||
console_mgmt_base_url = http://console-release.local
|
|
||||||
bucket_name = zenith-storage-oregon
|
|
||||||
bucket_region = us-west-2
|
|
||||||
etcd_endpoints = etcd-release.local:2379
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# get instance id from meta-data service
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
|
||||||
HOST=$(hostname -f)
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF | tee /tmp/payload
|
|
||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"host": "${HOST}",
|
|
||||||
"port": 6500,
|
|
||||||
"region_id": {{ console_region_id }},
|
|
||||||
"instance_id": "${INSTANCE_ID}",
|
|
||||||
"http_host": "${HOST}",
|
|
||||||
"http_port": 7676
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# check if safekeeper already registered or not
|
|
||||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
|
||||||
|
|
||||||
# not registered, so register it now
|
|
||||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
|
|
||||||
|
|
||||||
# init safekeeper
|
|
||||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
|
||||||
fi
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
#zenith-us-stage-ps-1 console_region_id=27
|
|
||||||
zenith-us-stage-ps-2 console_region_id=27
|
|
||||||
zenith-us-stage-ps-3 console_region_id=27
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
zenith-us-stage-sk-4 console_region_id=27
|
|
||||||
zenith-us-stage-sk-5 console_region_id=27
|
|
||||||
zenith-us-stage-sk-6 console_region_id=27
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = us-stage
|
|
||||||
console_mgmt_base_url = http://console-staging.local
|
|
||||||
bucket_name = zenith-staging-storage-us-east-1
|
|
||||||
bucket_region = us-east-1
|
|
||||||
etcd_endpoints = etcd-staging.local:2379
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Zenith safekeeper
|
|
||||||
After=network.target auditd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=safekeeper
|
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/lib
|
|
||||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
KillMode=mixed
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
TimeoutSec=10
|
|
||||||
LimitNOFILE=30000000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,742 +0,0 @@
|
|||||||
version: 2.1
|
|
||||||
|
|
||||||
executors:
|
|
||||||
neon-xlarge-executor:
|
|
||||||
resource_class: xlarge
|
|
||||||
docker:
|
|
||||||
# NB: when changed, do not forget to update rust image tag in all Dockerfiles
|
|
||||||
- image: zimg/rust:1.58
|
|
||||||
neon-executor:
|
|
||||||
docker:
|
|
||||||
- image: zimg/rust:1.58
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# A job to build postgres
|
|
||||||
build-postgres:
|
|
||||||
executor: neon-xlarge-executor
|
|
||||||
parameters:
|
|
||||||
build_type:
|
|
||||||
type: enum
|
|
||||||
enum: ["debug", "release"]
|
|
||||||
environment:
|
|
||||||
BUILD_TYPE: << parameters.build_type >>
|
|
||||||
steps:
|
|
||||||
# Checkout the git repo (circleci doesn't have a flag to enable submodules here)
|
|
||||||
- checkout
|
|
||||||
|
|
||||||
# Grab the postgres git revision to build a cache key.
|
|
||||||
# Append makefile as it could change the way postgres is built.
|
|
||||||
# Note this works even though the submodule hasn't been checkout out yet.
|
|
||||||
- run:
|
|
||||||
name: Get postgres cache key
|
|
||||||
command: |
|
|
||||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
|
||||||
cat Makefile >> /tmp/cache-key-postgres
|
|
||||||
|
|
||||||
- restore_cache:
|
|
||||||
name: Restore postgres cache
|
|
||||||
keys:
|
|
||||||
# Restore ONLY if the rev key matches exactly
|
|
||||||
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
|
||||||
|
|
||||||
# Build postgres if the restore_cache didn't find a build.
|
|
||||||
# `make` can't figure out whether the cache is valid, since
|
|
||||||
# it only compares file timestamps.
|
|
||||||
- run:
|
|
||||||
name: build postgres
|
|
||||||
command: |
|
|
||||||
if [ ! -e tmp_install/bin/postgres ]; then
|
|
||||||
# "depth 1" saves some time by not cloning the whole repo
|
|
||||||
git submodule update --init --depth 1
|
|
||||||
# bail out on any warnings
|
|
||||||
COPT='-Werror' mold -run make postgres -j$(nproc)
|
|
||||||
fi
|
|
||||||
|
|
||||||
- save_cache:
|
|
||||||
name: Save postgres cache
|
|
||||||
key: v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
|
||||||
paths:
|
|
||||||
- tmp_install
|
|
||||||
|
|
||||||
# A job to build Neon rust code
|
|
||||||
build-neon:
|
|
||||||
executor: neon-xlarge-executor
|
|
||||||
parameters:
|
|
||||||
build_type:
|
|
||||||
type: enum
|
|
||||||
enum: ["debug", "release"]
|
|
||||||
environment:
|
|
||||||
BUILD_TYPE: << parameters.build_type >>
|
|
||||||
steps:
|
|
||||||
# Checkout the git repo (without submodules)
|
|
||||||
- checkout
|
|
||||||
|
|
||||||
# Grab the postgres git revision to build a cache key.
|
|
||||||
# Append makefile as it could change the way postgres is built.
|
|
||||||
# Note this works even though the submodule hasn't been checkout out yet.
|
|
||||||
- run:
|
|
||||||
name: Get postgres cache key
|
|
||||||
command: |
|
|
||||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
|
||||||
cat Makefile >> /tmp/cache-key-postgres
|
|
||||||
|
|
||||||
|
|
||||||
- restore_cache:
|
|
||||||
name: Restore postgres cache
|
|
||||||
keys:
|
|
||||||
# Restore ONLY if the rev key matches exactly
|
|
||||||
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
|
||||||
|
|
||||||
- restore_cache:
|
|
||||||
name: Restore rust cache
|
|
||||||
keys:
|
|
||||||
# Require an exact match. While an out of date cache might speed up the build,
|
|
||||||
# there's no way to clean out old packages, so the cache grows every time something
|
|
||||||
# changes.
|
|
||||||
- v04-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
|
|
||||||
|
|
||||||
# Build the rust code, including test binaries
|
|
||||||
- run:
|
|
||||||
name: Rust build << parameters.build_type >>
|
|
||||||
command: |
|
|
||||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
|
||||||
CARGO_FLAGS=
|
|
||||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
|
||||||
CARGO_FLAGS="--release --features profiling"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export CARGO_INCREMENTAL=0
|
|
||||||
export CACHEPOT_BUCKET=zenith-rust-cachepot
|
|
||||||
export RUSTC_WRAPPER=cachepot
|
|
||||||
export AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}"
|
|
||||||
export AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}"
|
|
||||||
mold -run cargo build $CARGO_FLAGS --features failpoints --bins --tests
|
|
||||||
cachepot -s
|
|
||||||
|
|
||||||
- save_cache:
|
|
||||||
name: Save rust cache
|
|
||||||
key: v04-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
|
|
||||||
paths:
|
|
||||||
- ~/.cargo/registry
|
|
||||||
- ~/.cargo/git
|
|
||||||
- target
|
|
||||||
|
|
||||||
# Run rust unit tests
|
|
||||||
- run:
|
|
||||||
name: cargo test
|
|
||||||
command: |
|
|
||||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
|
||||||
CARGO_FLAGS=
|
|
||||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
|
||||||
CARGO_FLAGS=--release
|
|
||||||
fi
|
|
||||||
|
|
||||||
cargo test $CARGO_FLAGS
|
|
||||||
|
|
||||||
# Install the rust binaries, for use by test jobs
|
|
||||||
- run:
|
|
||||||
name: Install rust binaries
|
|
||||||
command: |
|
|
||||||
binaries=$(
|
|
||||||
cargo metadata --format-version=1 --no-deps |
|
|
||||||
jq -r '.packages[].targets[] | select(.kind | index("bin")) | .name'
|
|
||||||
)
|
|
||||||
|
|
||||||
test_exe_paths=$(
|
|
||||||
cargo test --message-format=json --no-run |
|
|
||||||
jq -r '.executable | select(. != null)'
|
|
||||||
)
|
|
||||||
|
|
||||||
mkdir -p /tmp/zenith/bin
|
|
||||||
mkdir -p /tmp/zenith/test_bin
|
|
||||||
mkdir -p /tmp/zenith/etc
|
|
||||||
|
|
||||||
# Install target binaries
|
|
||||||
for bin in $binaries; do
|
|
||||||
SRC=target/$BUILD_TYPE/$bin
|
|
||||||
DST=/tmp/zenith/bin/$bin
|
|
||||||
cp $SRC $DST
|
|
||||||
done
|
|
||||||
|
|
||||||
# Install the postgres binaries, for use by test jobs
|
|
||||||
- run:
|
|
||||||
name: Install postgres binaries
|
|
||||||
command: |
|
|
||||||
cp -a tmp_install /tmp/zenith/pg_install
|
|
||||||
|
|
||||||
# Save rust binaries for other jobs in the workflow
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: /tmp/zenith
|
|
||||||
paths:
|
|
||||||
- "*"
|
|
||||||
|
|
||||||
check-codestyle-python:
|
|
||||||
executor: neon-executor
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- restore_cache:
|
|
||||||
keys:
|
|
||||||
- v2-python-deps-{{ checksum "poetry.lock" }}
|
|
||||||
- run:
|
|
||||||
name: Install deps
|
|
||||||
command: ./scripts/pysync
|
|
||||||
- save_cache:
|
|
||||||
key: v2-python-deps-{{ checksum "poetry.lock" }}
|
|
||||||
paths:
|
|
||||||
- /home/circleci/.cache/pypoetry/virtualenvs
|
|
||||||
- run:
|
|
||||||
name: Print versions
|
|
||||||
when: always
|
|
||||||
command: |
|
|
||||||
poetry run python --version
|
|
||||||
poetry show
|
|
||||||
- run:
|
|
||||||
name: Run yapf to ensure code format
|
|
||||||
when: always
|
|
||||||
command: poetry run yapf --recursive --diff .
|
|
||||||
- run:
|
|
||||||
name: Run mypy to check types
|
|
||||||
when: always
|
|
||||||
command: poetry run mypy .
|
|
||||||
|
|
||||||
run-pytest:
|
|
||||||
executor: neon-executor
|
|
||||||
parameters:
|
|
||||||
# pytest args to specify the tests to run.
|
|
||||||
#
|
|
||||||
# This can be a test file name, e.g. 'test_pgbench.py, or a subdirectory,
|
|
||||||
# or '-k foobar' to run tests containing string 'foobar'. See pytest man page
|
|
||||||
# section SPECIFYING TESTS / SELECTING TESTS for details.
|
|
||||||
#
|
|
||||||
# Select the type of Rust build. Must be "release" or "debug".
|
|
||||||
build_type:
|
|
||||||
type: string
|
|
||||||
default: "debug"
|
|
||||||
# This parameter is required, to prevent the mistake of running all tests in one job.
|
|
||||||
test_selection:
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
# Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr
|
|
||||||
extra_params:
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
needs_postgres_source:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
run_in_parallel:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
save_perf_report:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
environment:
|
|
||||||
BUILD_TYPE: << parameters.build_type >>
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: /tmp/zenith
|
|
||||||
- checkout
|
|
||||||
- when:
|
|
||||||
condition: << parameters.needs_postgres_source >>
|
|
||||||
steps:
|
|
||||||
- run: git submodule update --init --depth 1
|
|
||||||
- restore_cache:
|
|
||||||
keys:
|
|
||||||
- v2-python-deps-{{ checksum "poetry.lock" }}
|
|
||||||
- run:
|
|
||||||
name: Install deps
|
|
||||||
command: ./scripts/pysync
|
|
||||||
- save_cache:
|
|
||||||
key: v2-python-deps-{{ checksum "poetry.lock" }}
|
|
||||||
paths:
|
|
||||||
- /home/circleci/.cache/pypoetry/virtualenvs
|
|
||||||
- run:
|
|
||||||
name: Run pytest
|
|
||||||
# pytest doesn't output test logs in real time, so CI job may fail with
|
|
||||||
# `Too long with no output` error, if a test is running for a long time.
|
|
||||||
# In that case, tests should have internal timeouts that are less than
|
|
||||||
# no_output_timeout, specified here.
|
|
||||||
no_output_timeout: 10m
|
|
||||||
environment:
|
|
||||||
- NEON_BIN: /tmp/zenith/bin
|
|
||||||
- POSTGRES_DISTRIB_DIR: /tmp/zenith/pg_install
|
|
||||||
- TEST_OUTPUT: /tmp/test_output
|
|
||||||
# this variable will be embedded in perf test report
|
|
||||||
# and is needed to distinguish different environments
|
|
||||||
- PLATFORM: zenith-local-ci
|
|
||||||
command: |
|
|
||||||
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
|
|
||||||
rm -rf $PERF_REPORT_DIR
|
|
||||||
|
|
||||||
TEST_SELECTION="test_runner/<< parameters.test_selection >>"
|
|
||||||
EXTRA_PARAMS="<< parameters.extra_params >>"
|
|
||||||
if [ -z "$TEST_SELECTION" ]; then
|
|
||||||
echo "test_selection must be set"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if << parameters.run_in_parallel >>; then
|
|
||||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
|
||||||
fi
|
|
||||||
if << parameters.save_perf_report >>; then
|
|
||||||
if [[ $CIRCLE_BRANCH == "main" ]]; then
|
|
||||||
mkdir -p "$PERF_REPORT_DIR"
|
|
||||||
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
export GITHUB_SHA=$CIRCLE_SHA1
|
|
||||||
|
|
||||||
# Run the tests.
|
|
||||||
#
|
|
||||||
# The junit.xml file allows CircleCI to display more fine-grained test information
|
|
||||||
# in its "Tests" tab in the results page.
|
|
||||||
# --verbose prints name of each test (helpful when there are
|
|
||||||
# multiple tests in one file)
|
|
||||||
# -rA prints summary in the end
|
|
||||||
# -n4 uses four processes to run tests via pytest-xdist
|
|
||||||
# -s is not used to prevent pytest from capturing output, because tests are running
|
|
||||||
# in parallel and logs are mixed between different tests
|
|
||||||
./scripts/pytest \
|
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
|
||||||
--tb=short \
|
|
||||||
--verbose \
|
|
||||||
-m "not remote_cluster" \
|
|
||||||
-rA $TEST_SELECTION $EXTRA_PARAMS
|
|
||||||
|
|
||||||
if << parameters.save_perf_report >>; then
|
|
||||||
if [[ $CIRCLE_BRANCH == "main" ]]; then
|
|
||||||
export REPORT_FROM="$PERF_REPORT_DIR"
|
|
||||||
export REPORT_TO=local
|
|
||||||
scripts/generate_and_push_perf_report.sh
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
- run:
|
|
||||||
# CircleCI artifacts are preserved one file at a time, so skipping
|
|
||||||
# this step isn't a good idea. If you want to extract the
|
|
||||||
# pageserver state, perhaps a tarball would be a better idea.
|
|
||||||
name: Delete all data but logs
|
|
||||||
when: always
|
|
||||||
command: |
|
|
||||||
du -sh /tmp/test_output/*
|
|
||||||
find /tmp/test_output -type f ! -name "*.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" ! -name "*.metrics" -delete
|
|
||||||
du -sh /tmp/test_output/*
|
|
||||||
- store_artifacts:
|
|
||||||
path: /tmp/test_output
|
|
||||||
# The store_test_results step tells CircleCI where to find the junit.xml file.
|
|
||||||
- store_test_results:
|
|
||||||
path: /tmp/test_output
|
|
||||||
# Save data (if any)
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: /tmp/zenith
|
|
||||||
paths:
|
|
||||||
- "*"
|
|
||||||
|
|
||||||
# Build neondatabase/neon:latest image and push it to Docker hub
|
|
||||||
docker-image:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker:
|
|
||||||
docker_layer_caching: true
|
|
||||||
- run:
|
|
||||||
name: Init postgres submodule
|
|
||||||
command: git submodule update --init --depth 1
|
|
||||||
- run:
|
|
||||||
name: Build and push Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
|
||||||
docker build \
|
|
||||||
--pull \
|
|
||||||
--build-arg GIT_VERSION=${CIRCLE_SHA1} \
|
|
||||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
|
||||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
|
||||||
--tag neondatabase/neon:${DOCKER_TAG} --tag neondatabase/neon:latest .
|
|
||||||
docker push neondatabase/neon:${DOCKER_TAG}
|
|
||||||
docker push neondatabase/neon:latest
|
|
||||||
|
|
||||||
# Build neondatabase/compute-node:latest image and push it to Docker hub
|
|
||||||
docker-image-compute:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker:
|
|
||||||
docker_layer_caching: true
|
|
||||||
- run:
|
|
||||||
name: Build and push compute-tools Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
docker build \
|
|
||||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
|
||||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
|
||||||
--tag neondatabase/compute-tools:local \
|
|
||||||
--tag neondatabase/compute-tools:latest \
|
|
||||||
-f Dockerfile.compute-tools .
|
|
||||||
# Only push :latest image
|
|
||||||
docker push neondatabase/compute-tools:latest
|
|
||||||
- run:
|
|
||||||
name: Init postgres submodule
|
|
||||||
command: git submodule update --init --depth 1
|
|
||||||
- run:
|
|
||||||
name: Build and push compute-node Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
|
||||||
docker build --tag neondatabase/compute-node:${DOCKER_TAG} \
|
|
||||||
--tag neondatabase/compute-node:latest vendor/postgres \
|
|
||||||
--build-arg COMPUTE_TOOLS_TAG=local
|
|
||||||
docker push neondatabase/compute-node:${DOCKER_TAG}
|
|
||||||
docker push neondatabase/compute-node:latest
|
|
||||||
|
|
||||||
# Build production neondatabase/neon:release image and push it to Docker hub
|
|
||||||
docker-image-release:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker:
|
|
||||||
docker_layer_caching: true
|
|
||||||
- run:
|
|
||||||
name: Init postgres submodule
|
|
||||||
command: git submodule update --init --depth 1
|
|
||||||
- run:
|
|
||||||
name: Build and push Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
|
||||||
docker build \
|
|
||||||
--pull \
|
|
||||||
--build-arg GIT_VERSION=${CIRCLE_SHA1} \
|
|
||||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
|
||||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
|
||||||
--tag neondatabase/neon:${DOCKER_TAG} --tag neondatabase/neon:release .
|
|
||||||
docker push neondatabase/neon:${DOCKER_TAG}
|
|
||||||
docker push neondatabase/neon:release
|
|
||||||
|
|
||||||
# Build production neondatabase/compute-node:release image and push it to Docker hub
|
|
||||||
docker-image-compute-release:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker:
|
|
||||||
docker_layer_caching: true
|
|
||||||
- run:
|
|
||||||
name: Build and push compute-tools Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
docker build \
|
|
||||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
|
||||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
|
||||||
--tag neondatabase/compute-tools:release \
|
|
||||||
--tag neondatabase/compute-tools:local \
|
|
||||||
-f Dockerfile.compute-tools .
|
|
||||||
# Only push :release image
|
|
||||||
docker push neondatabase/compute-tools:release
|
|
||||||
- run:
|
|
||||||
name: Init postgres submodule
|
|
||||||
command: git submodule update --init --depth 1
|
|
||||||
- run:
|
|
||||||
name: Build and push compute-node Docker image
|
|
||||||
command: |
|
|
||||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
|
||||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
|
||||||
docker build --tag neondatabase/compute-node:${DOCKER_TAG} \
|
|
||||||
--tag neondatabase/compute-node:release vendor/postgres \
|
|
||||||
--build-arg COMPUTE_TOOLS_TAG=local
|
|
||||||
docker push neondatabase/compute-node:${DOCKER_TAG}
|
|
||||||
docker push neondatabase/compute-node:release
|
|
||||||
|
|
||||||
deploy-staging:
|
|
||||||
docker:
|
|
||||||
- image: cimg/python:3.10
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run:
|
|
||||||
name: Setup ansible
|
|
||||||
command: |
|
|
||||||
pip install --progress-bar off --user ansible boto3
|
|
||||||
- run:
|
|
||||||
name: Redeploy
|
|
||||||
command: |
|
|
||||||
cd "$(pwd)/.circleci/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key
|
|
||||||
echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
|
||||||
chmod 0600 ssh-key
|
|
||||||
ssh-add ssh-key
|
|
||||||
rm -f ssh-key ssh-key-cert.pub
|
|
||||||
|
|
||||||
ansible-playbook deploy.yaml -i staging.hosts
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-staging-proxy:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
environment:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: Store kubeconfig file
|
|
||||||
command: |
|
|
||||||
echo "${STAGING_KUBECONFIG_DATA}" | base64 --decode > ${KUBECONFIG}
|
|
||||||
chmod 0600 ${KUBECONFIG}
|
|
||||||
- run:
|
|
||||||
name: Setup helm v3
|
|
||||||
command: |
|
|
||||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
- run:
|
|
||||||
name: Re-deploy proxy
|
|
||||||
command: |
|
|
||||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
|
||||||
helm upgrade neon-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
|
|
||||||
deploy-neon-stress:
|
|
||||||
docker:
|
|
||||||
- image: cimg/python:3.10
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run:
|
|
||||||
name: Setup ansible
|
|
||||||
command: |
|
|
||||||
pip install --progress-bar off --user ansible boto3
|
|
||||||
- run:
|
|
||||||
name: Redeploy
|
|
||||||
command: |
|
|
||||||
cd "$(pwd)/.circleci/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key
|
|
||||||
echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
|
||||||
chmod 0600 ssh-key
|
|
||||||
ssh-add ssh-key
|
|
||||||
rm -f ssh-key ssh-key-cert.pub
|
|
||||||
|
|
||||||
ansible-playbook deploy.yaml -i neon-stress.hosts
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-neon-stress-proxy:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
environment:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: Store kubeconfig file
|
|
||||||
command: |
|
|
||||||
echo "${NEON_STRESS_KUBECONFIG_DATA}" | base64 --decode > ${KUBECONFIG}
|
|
||||||
chmod 0600 ${KUBECONFIG}
|
|
||||||
- run:
|
|
||||||
name: Setup helm v3
|
|
||||||
command: |
|
|
||||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
- run:
|
|
||||||
name: Re-deploy proxy
|
|
||||||
command: |
|
|
||||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
|
||||||
helm upgrade neon-stress-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/neon-stress.proxy.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
helm upgrade neon-stress-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/neon-stress.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
|
|
||||||
deploy-release:
|
|
||||||
docker:
|
|
||||||
- image: cimg/python:3.10
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run:
|
|
||||||
name: Setup ansible
|
|
||||||
command: |
|
|
||||||
pip install --progress-bar off --user ansible boto3
|
|
||||||
- run:
|
|
||||||
name: Redeploy
|
|
||||||
command: |
|
|
||||||
cd "$(pwd)/.circleci/ansible"
|
|
||||||
|
|
||||||
RELEASE=true ./get_binaries.sh
|
|
||||||
|
|
||||||
echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key
|
|
||||||
echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
|
||||||
chmod 0600 ssh-key
|
|
||||||
ssh-add ssh-key
|
|
||||||
rm -f ssh-key ssh-key-cert.pub
|
|
||||||
|
|
||||||
ansible-playbook deploy.yaml -i production.hosts
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-release-proxy:
|
|
||||||
docker:
|
|
||||||
- image: cimg/base:2021.04
|
|
||||||
environment:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: Store kubeconfig file
|
|
||||||
command: |
|
|
||||||
echo "${PRODUCTION_KUBECONFIG_DATA}" | base64 --decode > ${KUBECONFIG}
|
|
||||||
chmod 0600 ${KUBECONFIG}
|
|
||||||
- run:
|
|
||||||
name: Setup helm v3
|
|
||||||
command: |
|
|
||||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
- run:
|
|
||||||
name: Re-deploy proxy
|
|
||||||
command: |
|
|
||||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
|
||||||
helm upgrade neon-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/production.proxy.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/production.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait
|
|
||||||
|
|
||||||
workflows:
|
|
||||||
build_and_test:
|
|
||||||
jobs:
|
|
||||||
- check-codestyle-python
|
|
||||||
- build-postgres:
|
|
||||||
name: build-postgres-<< matrix.build_type >>
|
|
||||||
matrix:
|
|
||||||
parameters:
|
|
||||||
build_type: ["debug", "release"]
|
|
||||||
- build-neon:
|
|
||||||
name: build-neon-<< matrix.build_type >>
|
|
||||||
matrix:
|
|
||||||
parameters:
|
|
||||||
build_type: ["debug", "release"]
|
|
||||||
requires:
|
|
||||||
- build-postgres-<< matrix.build_type >>
|
|
||||||
- run-pytest:
|
|
||||||
name: pg_regress-tests-<< matrix.build_type >>
|
|
||||||
matrix:
|
|
||||||
parameters:
|
|
||||||
build_type: ["debug", "release"]
|
|
||||||
test_selection: batch_pg_regress
|
|
||||||
needs_postgres_source: true
|
|
||||||
requires:
|
|
||||||
- build-neon-<< matrix.build_type >>
|
|
||||||
- run-pytest:
|
|
||||||
name: other-tests-<< matrix.build_type >>
|
|
||||||
matrix:
|
|
||||||
parameters:
|
|
||||||
build_type: ["debug", "release"]
|
|
||||||
test_selection: batch_others
|
|
||||||
requires:
|
|
||||||
- build-neon-<< matrix.build_type >>
|
|
||||||
- run-pytest:
|
|
||||||
name: benchmarks
|
|
||||||
context: PERF_TEST_RESULT_CONNSTR
|
|
||||||
build_type: release
|
|
||||||
test_selection: performance
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: true
|
|
||||||
requires:
|
|
||||||
- build-neon-release
|
|
||||||
- docker-image:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# Build image only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- pg_regress-tests-release
|
|
||||||
- other-tests-release
|
|
||||||
- docker-image-compute:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# Build image only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- pg_regress-tests-release
|
|
||||||
- other-tests-release
|
|
||||||
- deploy-staging:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- docker-image
|
|
||||||
- deploy-staging-proxy:
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- docker-image
|
|
||||||
|
|
||||||
- deploy-neon-stress:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- docker-image
|
|
||||||
- deploy-neon-stress-proxy:
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
requires:
|
|
||||||
- docker-image
|
|
||||||
|
|
||||||
- docker-image-release:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# Build image only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- release
|
|
||||||
requires:
|
|
||||||
- pg_regress-tests-release
|
|
||||||
- other-tests-release
|
|
||||||
- docker-image-compute-release:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# Build image only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- release
|
|
||||||
requires:
|
|
||||||
- pg_regress-tests-release
|
|
||||||
- other-tests-release
|
|
||||||
- deploy-release:
|
|
||||||
# Context gives an ability to login
|
|
||||||
context: Docker Hub
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- release
|
|
||||||
requires:
|
|
||||||
- docker-image-release
|
|
||||||
- deploy-release-proxy:
|
|
||||||
# deploy only for commits to main
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- release
|
|
||||||
requires:
|
|
||||||
- docker-image-release
|
|
||||||
@@ -1,18 +1,21 @@
|
|||||||
**/.git/
|
*
|
||||||
**/__pycache__
|
|
||||||
**/.pytest_cache
|
|
||||||
|
|
||||||
.git
|
!rust-toolchain.toml
|
||||||
target
|
!Cargo.toml
|
||||||
tmp_check
|
!Cargo.lock
|
||||||
tmp_install
|
!Makefile
|
||||||
tmp_check_cli
|
|
||||||
test_output
|
|
||||||
.vscode
|
|
||||||
.neon
|
|
||||||
integration_tests/.neon
|
|
||||||
.mypy_cache
|
|
||||||
|
|
||||||
Dockerfile
|
|
||||||
.dockerignore
|
|
||||||
|
|
||||||
|
!.cargo/
|
||||||
|
!.config/
|
||||||
|
!control_plane/
|
||||||
|
!compute_tools/
|
||||||
|
!libs/
|
||||||
|
!pageserver/
|
||||||
|
!pgxn/
|
||||||
|
!proxy/
|
||||||
|
!safekeeper/
|
||||||
|
!vendor/postgres-v14/
|
||||||
|
!vendor/postgres-v15/
|
||||||
|
!workspace_hack/
|
||||||
|
!neon_local/
|
||||||
|
!scripts/ninstall.sh
|
||||||
|
|||||||
1
.git-blame-ignore-revs
Normal file
1
.git-blame-ignore-revs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
4c2bb43775947775401cbb9d774823c5723a91f8
|
||||||
23
.github/ISSUE_TEMPLATE/bug-template.md
vendored
Normal file
23
.github/ISSUE_TEMPLATE/bug-template.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: Bug Template
|
||||||
|
about: Used for describing bugs
|
||||||
|
title: ''
|
||||||
|
labels: t/bug
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Steps to reproduce
|
||||||
|
|
||||||
|
|
||||||
|
## Expected result
|
||||||
|
|
||||||
|
|
||||||
|
## Actual result
|
||||||
|
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
|
||||||
|
## Logs, links
|
||||||
|
-
|
||||||
25
.github/ISSUE_TEMPLATE/epic-template.md
vendored
Normal file
25
.github/ISSUE_TEMPLATE/epic-template.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
name: Epic Template
|
||||||
|
about: A set of related tasks contributing towards specific outcome, comprising of
|
||||||
|
more than 1 week of work.
|
||||||
|
title: 'Epic: '
|
||||||
|
labels: t/Epic
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
|
||||||
|
## DoD
|
||||||
|
|
||||||
|
|
||||||
|
## Implementation ideas
|
||||||
|
|
||||||
|
|
||||||
|
## Tasks
|
||||||
|
- [ ]
|
||||||
|
|
||||||
|
|
||||||
|
## Other related tasks and Epics
|
||||||
|
-
|
||||||
20
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
Normal file
20
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
## Release 202Y-MM-DD
|
||||||
|
|
||||||
|
**NB: this PR must be merged only by 'Create a merge commit'!**
|
||||||
|
|
||||||
|
### Checklist when preparing for release
|
||||||
|
- [ ] Read or refresh [the release flow guide](https://github.com/neondatabase/cloud/wiki/Release:-general-flow)
|
||||||
|
- [ ] Ask in the [cloud Slack channel](https://neondb.slack.com/archives/C033A2WE6BZ) that you are going to rollout the release. Any blockers?
|
||||||
|
- [ ] Does this release contain any db migrations? Destructive ones? What is the rollback plan?
|
||||||
|
|
||||||
|
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
||||||
|
|
||||||
|
### Checklist after release
|
||||||
|
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
||||||
|
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
||||||
|
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
||||||
|
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)
|
||||||
|
- [ ] Check [cloud SLO dashboard](https://observer.zenith.tech/d/_oWcBMJ7k/cloud-slos?orgId=1)
|
||||||
|
- [ ] Check [compute startup metrics dashboard](https://observer.zenith.tech/d/5OkYJEmVz/compute-startup-time)
|
||||||
|
|
||||||
|
<!-- List everything that should be done **after** release, any admin UI configuration / Grafana dashboard / alert changes / setting changes / etc -->
|
||||||
221
.github/actions/allure-report/action.yml
vendored
Normal file
221
.github/actions/allure-report/action.yml
vendored
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
name: 'Create Allure report'
|
||||||
|
description: 'Create and publish Allure report'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
action:
|
||||||
|
desctiption: 'generate or store'
|
||||||
|
required: true
|
||||||
|
build_type:
|
||||||
|
description: '`build_type` from run-python-test-set action'
|
||||||
|
required: true
|
||||||
|
test_selection:
|
||||||
|
description: '`test_selector` from run-python-test-set action'
|
||||||
|
required: false
|
||||||
|
outputs:
|
||||||
|
report-url:
|
||||||
|
description: 'Allure report URL'
|
||||||
|
value: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Validate input parameters
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if [ "${{ inputs.action }}" != "store" ] && [ "${{ inputs.action }}" != "generate" ]; then
|
||||||
|
echo 2>&1 "Unknown inputs.action type '${{ inputs.action }}'; allowed 'generate' or 'store' only"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${{ inputs.test_selection }}" ] && [ "${{ inputs.action }}" == "store" ]; then
|
||||||
|
echo 2>&1 "inputs.test_selection must be set for 'store' action"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Calculate key
|
||||||
|
id: calculate-key
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
||||||
|
|
||||||
|
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
|
if [ "${pr_number}" != "null" ]; then
|
||||||
|
key=pr-${pr_number}
|
||||||
|
elif [ "${GITHUB_REF}" = "refs/heads/main" ]; then
|
||||||
|
# Shortcut for a special branch
|
||||||
|
key=main
|
||||||
|
else
|
||||||
|
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
|
||||||
|
fi
|
||||||
|
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- uses: actions/setup-java@v3
|
||||||
|
if: ${{ inputs.action == 'generate' }}
|
||||||
|
with:
|
||||||
|
distribution: 'temurin'
|
||||||
|
java-version: '17'
|
||||||
|
|
||||||
|
- name: Install Allure
|
||||||
|
if: ${{ inputs.action == 'generate' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if ! which allure; then
|
||||||
|
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip
|
||||||
|
wget -q https://github.com/allure-framework/allure2/releases/download/${ALLURE_VERSION}/${ALLURE_ZIP}
|
||||||
|
echo "${ALLURE_ZIP_MD5} ${ALLURE_ZIP}" | md5sum -c
|
||||||
|
unzip -q ${ALLURE_ZIP}
|
||||||
|
echo "$(pwd)/allure-${ALLURE_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
rm -f ${ALLURE_ZIP}
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
ALLURE_VERSION: 2.19.0
|
||||||
|
ALLURE_ZIP_MD5: ced21401a1a8b9dfb68cee9e4c210464
|
||||||
|
|
||||||
|
- name: Upload Allure results
|
||||||
|
if: ${{ inputs.action == 'store' }}
|
||||||
|
env:
|
||||||
|
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
|
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# Add metadata
|
||||||
|
cat <<EOF > $TEST_OUTPUT/allure/results/executor.json
|
||||||
|
{
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/latest/index.html",
|
||||||
|
"buildOrder": ${GITHUB_RUN_ID},
|
||||||
|
"buildName": "GitHub Actions Run #${{ github.run_number }}/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"buildUrl": "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/attempts/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"reportUrl": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html",
|
||||||
|
"reportName": "Allure Report"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
cat <<EOF > $TEST_OUTPUT/allure/results/environment.properties
|
||||||
|
TEST_SELECTION=${{ inputs.test_selection }}
|
||||||
|
BUILD_TYPE=${{ inputs.build_type }}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
ARCHIVE="${GITHUB_RUN_ID}-${{ inputs.test_selection }}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
||||||
|
aws s3 mv --only-show-errors ${ARCHIVE} "s3://${BUCKET}/${RAW_PREFIX}/${ARCHIVE}"
|
||||||
|
|
||||||
|
# Potentially we could have several running build for the same key (for example for the main branch), so we use improvised lock for this
|
||||||
|
- name: Acquire Allure lock
|
||||||
|
if: ${{ inputs.action == 'generate' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
run: |
|
||||||
|
LOCK_TIMEOUT=300 # seconds
|
||||||
|
|
||||||
|
for _ in $(seq 1 5); do
|
||||||
|
for i in $(seq 1 ${LOCK_TIMEOUT}); do
|
||||||
|
LOCK_ADDED=$(aws s3api head-object --bucket neon-github-public-dev --key ${LOCK_FILE} | jq --raw-output '.LastModified' || true)
|
||||||
|
# `date --date="..."` is supported only by gnu date (i.e. it doesn't work on BSD/macOS)
|
||||||
|
if [ -z "${LOCK_ADDED}" ] || [ "$(( $(date +%s) - $(date --date="${LOCK_ADDED}" +%s) ))" -gt "${LOCK_TIMEOUT}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" > lock.txt
|
||||||
|
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
|
||||||
|
# A double-check that exactly WE have acquired the lock
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||||
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Generate and publish final Allure report
|
||||||
|
if: ${{ inputs.action == 'generate' }}
|
||||||
|
id: generate-report
|
||||||
|
env:
|
||||||
|
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
|
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# Get previously uploaded data for this run
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
s3_filepaths=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/${GITHUB_RUN_ID}- | jq --raw-output '.Contents[].Key')
|
||||||
|
if [ -z "$s3_filepaths" ]; then
|
||||||
|
# There's no previously uploaded data for this run
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
for s3_filepath in ${s3_filepaths}; do
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${s3_filepath}" "${TEST_OUTPUT}/allure/"
|
||||||
|
|
||||||
|
archive=${TEST_OUTPUT}/allure/$(basename $s3_filepath)
|
||||||
|
mkdir -p ${archive%.tar.zst}
|
||||||
|
tar -xf ${archive} -C ${archive%.tar.zst}
|
||||||
|
rm -f ${archive}
|
||||||
|
done
|
||||||
|
|
||||||
|
# Get history trend
|
||||||
|
aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${REPORT_PREFIX}/latest/history" "${TEST_OUTPUT}/allure/latest/history" || true
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
allure generate --clean --output $TEST_OUTPUT/allure/report $TEST_OUTPUT/allure/*
|
||||||
|
|
||||||
|
# Replace a logo link with a redirect to the latest version of the report
|
||||||
|
sed -i 's|<a href="." class=|<a href="https://'${BUCKET}'.s3.amazonaws.com/'${REPORT_PREFIX}'/latest/index.html" class=|g' $TEST_OUTPUT/allure/report/app.js
|
||||||
|
|
||||||
|
# Upload a history and the final report (in this particular order to not to have duplicated history in 2 places)
|
||||||
|
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report/history" "s3://${BUCKET}/${REPORT_PREFIX}/latest/history"
|
||||||
|
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html
|
||||||
|
|
||||||
|
# Generate redirect
|
||||||
|
cat <<EOF > ./index.html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Redirecting to ${REPORT_URL}</title>
|
||||||
|
<meta http-equiv="refresh" content="0; URL=${REPORT_URL}">
|
||||||
|
EOF
|
||||||
|
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
||||||
|
|
||||||
|
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Release Allure lock
|
||||||
|
if: ${{ inputs.action == 'generate' && always() }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
run: |
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||||
|
|
||||||
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||||
|
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/github-script@v6
|
||||||
|
if: ${{ inputs.action == 'generate' && always() }}
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { REPORT_URL, BUILD_TYPE, SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: `Allure report / ${BUILD_TYPE}`,
|
||||||
|
})
|
||||||
59
.github/actions/download/action.yml
vendored
Normal file
59
.github/actions/download/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: "Download an artifact"
|
||||||
|
description: "Custom download action"
|
||||||
|
inputs:
|
||||||
|
name:
|
||||||
|
description: "Artifact name"
|
||||||
|
required: true
|
||||||
|
path:
|
||||||
|
description: "A directory to put artifact into"
|
||||||
|
default: "."
|
||||||
|
required: false
|
||||||
|
skip-if-does-not-exist:
|
||||||
|
description: "Allow to skip if file doesn't exist, fail otherwise"
|
||||||
|
default: false
|
||||||
|
required: false
|
||||||
|
prefix:
|
||||||
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
|
required: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Download artifact
|
||||||
|
id: download-artifact
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
TARGET: ${{ inputs.path }}
|
||||||
|
ARCHIVE: /tmp/downloads/${{ inputs.name }}.tar.zst
|
||||||
|
SKIP_IF_DOES_NOT_EXIST: ${{ inputs.skip-if-does-not-exist }}
|
||||||
|
PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}', github.run_id, github.run_attempt) }}
|
||||||
|
run: |
|
||||||
|
BUCKET=neon-github-public-dev
|
||||||
|
FILENAME=$(basename $ARCHIVE)
|
||||||
|
|
||||||
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
|
if [ -z "${S3_KEY}" ]; then
|
||||||
|
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
||||||
|
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
|
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}
|
||||||
|
|
||||||
|
- name: Extract artifact
|
||||||
|
if: ${{ steps.download-artifact.outputs.SKIPPED == 'false' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
TARGET: ${{ inputs.path }}
|
||||||
|
ARCHIVE: /tmp/downloads/${{ inputs.name }}.tar.zst
|
||||||
|
run: |
|
||||||
|
mkdir -p ${TARGET}
|
||||||
|
time tar -xf ${ARCHIVE} -C ${TARGET}
|
||||||
|
rm -f ${ARCHIVE}
|
||||||
82
.github/actions/neon-project-create/action.yml
vendored
Normal file
82
.github/actions/neon-project-create/action.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
name: 'Create Neon Project'
|
||||||
|
description: 'Create Neon Project using API'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
api_key:
|
||||||
|
desctiption: 'Neon API key'
|
||||||
|
required: true
|
||||||
|
environment:
|
||||||
|
desctiption: 'dev (aka captest) or stage'
|
||||||
|
required: true
|
||||||
|
region_id:
|
||||||
|
desctiption: 'Region ID, if not set the project will be created in the default region'
|
||||||
|
required: false
|
||||||
|
outputs:
|
||||||
|
dsn:
|
||||||
|
description: 'Created Project DSN (for main database)'
|
||||||
|
value: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
project_id:
|
||||||
|
description: 'Created Project ID'
|
||||||
|
value: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Parse Input
|
||||||
|
id: parse-input
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
case "${ENVIRONMENT}" in
|
||||||
|
dev)
|
||||||
|
API_HOST=console.dev.neon.tech
|
||||||
|
REGION_ID=${REGION_ID:-eu-west-1}
|
||||||
|
;;
|
||||||
|
staging)
|
||||||
|
API_HOST=console.stage.neon.tech
|
||||||
|
REGION_ID=${REGION_ID:-us-east-1}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
|
||||||
|
echo "region_id=${REGION_ID}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
ENVIRONMENT: ${{ inputs.environment }}
|
||||||
|
REGION_ID: ${{ inputs.region_id }}
|
||||||
|
|
||||||
|
- name: Create Neon Project
|
||||||
|
id: create-neon-project
|
||||||
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
|
shell: bash -euo pipefail {0}
|
||||||
|
run: |
|
||||||
|
project=$(curl \
|
||||||
|
"https://${API_HOST}/api/v1/projects" \
|
||||||
|
--fail \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}" \
|
||||||
|
--data "{
|
||||||
|
\"project\": {
|
||||||
|
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||||
|
\"platform_id\": \"aws\",
|
||||||
|
\"region_id\": \"${REGION_ID}\",
|
||||||
|
\"settings\": { }
|
||||||
|
}
|
||||||
|
}")
|
||||||
|
|
||||||
|
# Mask password
|
||||||
|
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
||||||
|
|
||||||
|
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
|
||||||
|
echo "::add-mask::${dsn}"
|
||||||
|
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
project_id=$(echo $project | jq --raw-output '.id')
|
||||||
|
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||||
|
REGION_ID: ${{ steps.parse-input.outputs.region_id }}
|
||||||
54
.github/actions/neon-project-delete/action.yml
vendored
Normal file
54
.github/actions/neon-project-delete/action.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
name: 'Delete Neon Project'
|
||||||
|
description: 'Delete Neon Project using API'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
api_key:
|
||||||
|
desctiption: 'Neon API key'
|
||||||
|
required: true
|
||||||
|
environment:
|
||||||
|
desctiption: 'dev (aka captest) or stage'
|
||||||
|
required: true
|
||||||
|
project_id:
|
||||||
|
desctiption: 'ID of the Project to delete'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Parse Input
|
||||||
|
id: parse-input
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
case "${ENVIRONMENT}" in
|
||||||
|
dev)
|
||||||
|
API_HOST=console.dev.neon.tech
|
||||||
|
;;
|
||||||
|
staging)
|
||||||
|
API_HOST=console.stage.neon.tech
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
ENVIRONMENT: ${{ inputs.environment }}
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# Allow PROJECT_ID to be empty/null for cases when .github/actions/neon-project-create failed
|
||||||
|
if [ -n "${PROJECT_ID}" ]; then
|
||||||
|
curl -X "POST" \
|
||||||
|
"https://${API_HOST}/api/v1/projects/${PROJECT_ID}/delete" \
|
||||||
|
--fail \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||||
154
.github/actions/run-python-test-set/action.yml
vendored
154
.github/actions/run-python-test-set/action.yml
vendored
@@ -3,10 +3,7 @@ description: 'Runs a Neon python test set, performing all the required preparati
|
|||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
build_type:
|
build_type:
|
||||||
description: 'Type of Rust (neon) and C (postgres) builds. Must be "release" or "debug".'
|
description: 'Type of Rust (neon) and C (postgres) builds. Must be "release" or "debug", or "remote" for the remote cluster'
|
||||||
required: true
|
|
||||||
rust_toolchain:
|
|
||||||
description: 'Rust toolchain version to fetch the caches'
|
|
||||||
required: true
|
required: true
|
||||||
test_selection:
|
test_selection:
|
||||||
description: 'A python test suite to run'
|
description: 'A python test suite to run'
|
||||||
@@ -24,25 +21,55 @@ inputs:
|
|||||||
required: false
|
required: false
|
||||||
default: 'true'
|
default: 'true'
|
||||||
save_perf_report:
|
save_perf_report:
|
||||||
description: 'Whether to upload the performance report'
|
description: 'Whether to upload the performance report, if true PERF_TEST_RESULT_CONNSTR env variable should be set'
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
|
run_with_real_s3:
|
||||||
|
description: 'Whether to pass real s3 credentials to the test suite'
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
real_s3_bucket:
|
||||||
|
description: 'Bucket name for real s3 tests'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
real_s3_region:
|
||||||
|
description: 'Region name for real s3 tests'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
real_s3_access_key_id:
|
||||||
|
description: 'Access key id'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
real_s3_secret_access_key:
|
||||||
|
description: 'Secret access key'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Get Neon artifact for restoration
|
- name: Get Neon artifact
|
||||||
uses: actions/download-artifact@v3
|
if: inputs.build_type != 'remote'
|
||||||
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.rust_toolchain }}-artifact
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
||||||
path: ./neon-artifact/
|
path: /tmp/neon
|
||||||
|
|
||||||
- name: Extract Neon artifact
|
- name: Download Neon binaries for the previous release
|
||||||
shell: bash -ex {0}
|
if: inputs.build_type != 'remote'
|
||||||
run: |
|
uses: ./.github/actions/download
|
||||||
mkdir -p /tmp/neon/
|
with:
|
||||||
tar -xf ./neon-artifact/neon.tgz -C /tmp/neon/
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
||||||
rm -rf ./neon-artifact/
|
path: /tmp/neon-previous
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Download compatibility snapshot for Postgres 14
|
||||||
|
if: inputs.build_type != 'remote'
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg14
|
||||||
|
path: /tmp/compatibility_snapshot_pg14
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
@@ -59,19 +86,33 @@ runs:
|
|||||||
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
|
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Python deps
|
- name: Install Python deps
|
||||||
shell: bash -ex {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: ./scripts/pysync
|
run: ./scripts/pysync
|
||||||
|
|
||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
NEON_BIN: /tmp/neon/bin
|
NEON_BIN: /tmp/neon/bin
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
COMPATIBILITY_NEON_BIN: /tmp/neon-previous/bin
|
||||||
|
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
# this variable will be embedded in perf test report
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
# and is needed to distinguish different environments
|
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
||||||
PLATFORM: github-actions-selfhosted
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
||||||
shell: bash -ex {0}
|
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg14
|
||||||
|
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
||||||
|
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
|
# PLATFORM will be embedded in the perf test report
|
||||||
|
# and it is needed to distinguish different environments
|
||||||
|
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
|
||||||
|
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
|
||||||
|
export DEFAULT_PG_VERSION=${DEFAULT_PG_VERSION:-14}
|
||||||
|
|
||||||
|
if [ "${BUILD_TYPE}" = "remote" ]; then
|
||||||
|
export REMOTE_ENV=1
|
||||||
|
fi
|
||||||
|
|
||||||
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
|
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
|
||||||
rm -rf $PERF_REPORT_DIR
|
rm -rf $PERF_REPORT_DIR
|
||||||
|
|
||||||
@@ -82,59 +123,76 @@ runs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
||||||
|
# -n4 uses four processes to run tests via pytest-xdist
|
||||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
||||||
|
|
||||||
|
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
||||||
|
# to the same worker to make @pytest.mark.order work with xdist
|
||||||
|
EXTRA_PARAMS="--dist=loadgroup $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
||||||
|
echo "REAL S3 ENABLED"
|
||||||
|
export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty
|
||||||
|
export REMOTE_STORAGE_S3_BUCKET=${{ inputs.real_s3_bucket }}
|
||||||
|
export REMOTE_STORAGE_S3_REGION=${{ inputs.real_s3_region }}
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
||||||
if [[ "$GITHUB_REF" == "main" ]]; then
|
mkdir -p "$PERF_REPORT_DIR"
|
||||||
mkdir -p "$PERF_REPORT_DIR"
|
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
||||||
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
||||||
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
||||||
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
||||||
cov_prefix=()
|
cov_prefix=()
|
||||||
|
else
|
||||||
|
cov_prefix=()
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wake up the cluster if we use remote neon instance
|
||||||
|
if [ "${{ inputs.build_type }}" = "remote" ] && [ -n "${BENCHMARK_CONNSTR}" ]; then
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/psql ${BENCHMARK_CONNSTR} -c "SELECT version();"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run the tests.
|
# Run the tests.
|
||||||
#
|
#
|
||||||
# The junit.xml file allows CircleCI to display more fine-grained test information
|
# The junit.xml file allows CI tools to display more fine-grained test information
|
||||||
# in its "Tests" tab in the results page.
|
# in its "Tests" tab in the results page.
|
||||||
# --verbose prints name of each test (helpful when there are
|
# --verbose prints name of each test (helpful when there are
|
||||||
# multiple tests in one file)
|
# multiple tests in one file)
|
||||||
# -rA prints summary in the end
|
# -rA prints summary in the end
|
||||||
# -n4 uses four processes to run tests via pytest-xdist
|
|
||||||
# -s is not used to prevent pytest from capturing output, because tests are running
|
# -s is not used to prevent pytest from capturing output, because tests are running
|
||||||
# in parallel and logs are mixed between different tests
|
# in parallel and logs are mixed between different tests
|
||||||
|
#
|
||||||
|
mkdir -p $TEST_OUTPUT/allure/results
|
||||||
"${cov_prefix[@]}" ./scripts/pytest \
|
"${cov_prefix[@]}" ./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
|
--alluredir=$TEST_OUTPUT/allure/results \
|
||||||
--tb=short \
|
--tb=short \
|
||||||
--verbose \
|
--verbose \
|
||||||
-m "not remote_cluster" \
|
|
||||||
-rA $TEST_SELECTION $EXTRA_PARAMS
|
-rA $TEST_SELECTION $EXTRA_PARAMS
|
||||||
|
|
||||||
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
||||||
if [[ "$GITHUB_REF" == "main" ]]; then
|
export REPORT_FROM="$PERF_REPORT_DIR"
|
||||||
export REPORT_FROM="$PERF_REPORT_DIR"
|
export REPORT_TO="$PLATFORM"
|
||||||
export REPORT_TO=local
|
scripts/generate_and_push_perf_report.sh
|
||||||
scripts/generate_and_push_perf_report.sh
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Delete all data but logs
|
- name: Upload compatibility snapshot for Postgres 14
|
||||||
shell: bash -ex {0}
|
if: github.ref_name == 'release'
|
||||||
if: always()
|
uses: ./.github/actions/upload
|
||||||
run: |
|
|
||||||
du -sh /tmp/test_output/*
|
|
||||||
find /tmp/test_output -type f ! -name "*.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" ! -name "*.metrics" -delete
|
|
||||||
du -sh /tmp/test_output/*
|
|
||||||
|
|
||||||
- name: Upload python test logs
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
retention-days: 7
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg14-${{ github.run_id }}
|
||||||
if-no-files-found: error
|
# The path includes a test name (test_create_snapshot) and directory that the test creates (compatibility_snapshot_pg14), keep the path in sync with the test
|
||||||
name: python-test-${{ inputs.test_selection }}-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.rust_toolchain }}-logs
|
path: /tmp/test_output/test_create_snapshot/compatibility_snapshot_pg14/
|
||||||
path: /tmp/test_output/
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: store
|
||||||
|
build_type: ${{ inputs.build_type }}
|
||||||
|
test_selection: ${{ inputs.test_selection }}
|
||||||
|
|||||||
17
.github/actions/save-coverage-data/action.yml
vendored
17
.github/actions/save-coverage-data/action.yml
vendored
@@ -5,13 +5,18 @@ runs:
|
|||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
shell: bash -ex {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
|
|
||||||
- name: Upload coverage data
|
- name: Download previous coverage data into the same directory
|
||||||
uses: actions/upload-artifact@v3
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
retention-days: 7
|
|
||||||
if-no-files-found: error
|
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage/
|
path: /tmp/coverage
|
||||||
|
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
||||||
|
|
||||||
|
- name: Upload coverage data
|
||||||
|
uses: ./.github/actions/upload
|
||||||
|
with:
|
||||||
|
name: coverage-data-artifact
|
||||||
|
path: /tmp/coverage
|
||||||
|
|||||||
58
.github/actions/upload/action.yml
vendored
Normal file
58
.github/actions/upload/action.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: "Upload an artifact"
|
||||||
|
description: "Custom upload action"
|
||||||
|
inputs:
|
||||||
|
name:
|
||||||
|
description: "Artifact name"
|
||||||
|
required: true
|
||||||
|
path:
|
||||||
|
description: "A directory or file to upload"
|
||||||
|
required: true
|
||||||
|
prefix:
|
||||||
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
|
required: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Prepare artifact
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
SOURCE: ${{ inputs.path }}
|
||||||
|
ARCHIVE: /tmp/uploads/${{ inputs.name }}.tar.zst
|
||||||
|
run: |
|
||||||
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
|
|
||||||
|
if [ -f ${ARCHIVE} ]; then
|
||||||
|
echo 2>&1 "File ${ARCHIVE} already exist. Something went wrong before"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
if [ -d ${SOURCE} ]; then
|
||||||
|
time tar -C ${SOURCE} -cf ${ARCHIVE} --zstd .
|
||||||
|
elif [ -f ${SOURCE} ]; then
|
||||||
|
time tar -cf ${ARCHIVE} --zstd ${SOURCE}
|
||||||
|
elif ! ls ${SOURCE} > /dev/null 2>&1; then
|
||||||
|
echo 2>&1 "${SOURCE} does not exist"
|
||||||
|
exit 2
|
||||||
|
else
|
||||||
|
echo 2>&1 "${SOURCE} is neither a directory nor a file, do not know how to handle it"
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
SOURCE: ${{ inputs.path }}
|
||||||
|
ARCHIVE: /tmp/uploads/${{ inputs.name }}.tar.zst
|
||||||
|
PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}', github.run_id, github.run_attempt) }}
|
||||||
|
run: |
|
||||||
|
BUCKET=neon-github-public-dev
|
||||||
|
FILENAME=$(basename $ARCHIVE)
|
||||||
|
|
||||||
|
FILESIZE=$(du -sh ${ARCHIVE} | cut -f1)
|
||||||
|
|
||||||
|
time aws s3 mv --only-show-errors ${ARCHIVE} s3://${BUCKET}/${PREFIX}/${FILENAME}
|
||||||
|
|
||||||
|
# Ref https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary
|
||||||
|
echo "[${FILENAME}](https://${BUCKET}.s3.amazonaws.com/${PREFIX}/${FILENAME}) ${FILESIZE}" >> ${GITHUB_STEP_SUMMARY}
|
||||||
5
.github/ansible/.gitignore
vendored
Normal file
5
.github/ansible/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
neon_install.tar.gz
|
||||||
|
.neon_current_version
|
||||||
|
|
||||||
|
collections/*
|
||||||
|
!collections/.keep
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
localhost_warning = False
|
localhost_warning = False
|
||||||
host_key_checking = False
|
host_key_checking = False
|
||||||
timeout = 30
|
timeout = 30
|
||||||
|
collections_paths = ./collections
|
||||||
|
|
||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
ssh_args = -F ./ansible.ssh.cfg
|
ssh_args = -F ./ansible.ssh.cfg
|
||||||
0
.github/ansible/collections/.keep
vendored
Normal file
0
.github/ansible/collections/.keep
vendored
Normal file
@@ -1,7 +1,7 @@
|
|||||||
- name: Upload Neon binaries
|
- name: Upload Neon binaries
|
||||||
hosts: storage
|
hosts: storage
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -14,7 +14,8 @@
|
|||||||
- safekeeper
|
- safekeeper
|
||||||
|
|
||||||
- name: inform about versions
|
- name: inform about versions
|
||||||
debug: msg="Version to deploy - {{ current_version }}"
|
debug:
|
||||||
|
msg: "Version to deploy - {{ current_version }}"
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
- safekeeper
|
- safekeeper
|
||||||
@@ -35,7 +36,7 @@
|
|||||||
- name: Deploy pageserver
|
- name: Deploy pageserver
|
||||||
hosts: pageservers
|
hosts: pageservers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -58,20 +59,34 @@
|
|||||||
creates: "/storage/pageserver/data/tenants"
|
creates: "/storage/pageserver/data/tenants"
|
||||||
environment:
|
environment:
|
||||||
NEON_REPO_DIR: "/storage/pageserver/data"
|
NEON_REPO_DIR: "/storage/pageserver/data"
|
||||||
LD_LIBRARY_PATH: "/usr/local/lib"
|
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
|
|
||||||
- name: update remote storage (s3) config
|
- name: read the existing remote pageserver config
|
||||||
lineinfile:
|
ansible.builtin.slurp:
|
||||||
path: /storage/pageserver/data/pageserver.toml
|
src: /storage/pageserver/data/pageserver.toml
|
||||||
line: "{{ item }}"
|
register: _remote_ps_config
|
||||||
loop:
|
tags:
|
||||||
- "[remote_storage]"
|
- pageserver
|
||||||
- "bucket_name = '{{ bucket_name }}'"
|
|
||||||
- "bucket_region = '{{ bucket_region }}'"
|
- name: parse the existing pageserver configuration
|
||||||
- "prefix_in_bucket = '{{ inventory_hostname }}'"
|
ansible.builtin.set_fact:
|
||||||
|
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
|
||||||
|
tags:
|
||||||
|
- pageserver
|
||||||
|
|
||||||
|
- name: construct the final pageserver configuration dict
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
|
||||||
|
tags:
|
||||||
|
- pageserver
|
||||||
|
|
||||||
|
- name: template the pageserver config
|
||||||
|
template:
|
||||||
|
src: templates/pageserver.toml.j2
|
||||||
|
dest: /storage/pageserver/data/pageserver.toml
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
@@ -109,7 +124,7 @@
|
|||||||
- name: Deploy safekeeper
|
- name: Deploy safekeeper
|
||||||
hosts: safekeepers
|
hosts: safekeepers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -132,7 +147,7 @@
|
|||||||
creates: "/storage/safekeeper/data/safekeeper.id"
|
creates: "/storage/safekeeper/data/safekeeper.id"
|
||||||
environment:
|
environment:
|
||||||
NEON_REPO_DIR: "/storage/safekeeper/data"
|
NEON_REPO_DIR: "/storage/safekeeper/data"
|
||||||
LD_LIBRARY_PATH: "/usr/local/lib"
|
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- safekeeper
|
- safekeeper
|
||||||
41
.github/ansible/get_binaries.sh
vendored
Executable file
41
.github/ansible/get_binaries.sh
vendored
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ -n "${DOCKER_TAG}" ]; then
|
||||||
|
# Verson is DOCKER_TAG but without prefix
|
||||||
|
VERSION=$(echo $DOCKER_TAG | sed 's/^.*-//g')
|
||||||
|
else
|
||||||
|
echo "Please set DOCKER_TAG environment variable"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# do initial cleanup
|
||||||
|
rm -rf neon_install postgres_install.tar.gz neon_install.tar.gz .neon_current_version
|
||||||
|
mkdir neon_install
|
||||||
|
|
||||||
|
# retrieve binaries from docker image
|
||||||
|
echo "getting binaries from docker image"
|
||||||
|
docker pull --quiet neondatabase/neon:${DOCKER_TAG}
|
||||||
|
ID=$(docker create neondatabase/neon:${DOCKER_TAG})
|
||||||
|
docker cp ${ID}:/data/postgres_install.tar.gz .
|
||||||
|
tar -xzf postgres_install.tar.gz -C neon_install
|
||||||
|
mkdir neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
||||||
|
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
||||||
|
docker cp ${ID}:/usr/local/v14/lib/ neon_install/v14/lib/
|
||||||
|
docker cp ${ID}:/usr/local/v15/lib/ neon_install/v15/lib/
|
||||||
|
docker rm -vf ${ID}
|
||||||
|
|
||||||
|
# store version to file (for ansible playbooks) and create binaries tarball
|
||||||
|
echo ${VERSION} > neon_install/.neon_current_version
|
||||||
|
echo ${VERSION} > .neon_current_version
|
||||||
|
tar -czf neon_install.tar.gz -C neon_install .
|
||||||
|
|
||||||
|
# do final cleaup
|
||||||
|
rm -rf neon_install postgres_install.tar.gz
|
||||||
31
.github/ansible/neon-stress.hosts.yaml
vendored
Normal file
31
.github/ansible/neon-stress.hosts.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-storage-ireland
|
||||||
|
bucket_region: eu-west-1
|
||||||
|
console_mgmt_base_url: http://neon-stress-console.local
|
||||||
|
etcd_endpoints: neon-stress-etcd.local:2379
|
||||||
|
safekeeper_enable_s3_offload: 'false'
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "{{ inventory_hostname }}"
|
||||||
|
safekeeper_s3_prefix: neon-stress/wal
|
||||||
|
hostname_suffix: ".local"
|
||||||
|
remote_user: admin
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
neon-stress-ps-1:
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
neon-stress-ps-2:
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
neon-stress-sk-1:
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
neon-stress-sk-2:
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
neon-stress-sk-3:
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
35
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
Normal file
35
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-ap-southeast-1
|
||||||
|
bucket_region: ap-southeast-1
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
etcd_endpoints: etcd-0.ap-southeast-1.aws.neon.tech:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: ap-southeast-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-ap-southeast-1
|
||||||
|
console_region_id: aws-ap-southeast-1
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-064de8ea28bdb495b
|
||||||
|
pageserver-1.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0b180defcaeeb6b93
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0d6f1dc5161eef894
|
||||||
|
safekeeper-1.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0e338adda8eb2d19f
|
||||||
|
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-04fb63634e4679eb9
|
||||||
35
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
Normal file
35
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-eu-central-1
|
||||||
|
bucket_region: eu-central-1
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
etcd_endpoints: etcd-0.eu-central-1.aws.neon.tech:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: eu-central-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-eu-central-1
|
||||||
|
console_region_id: aws-eu-central-1
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0cd8d316ecbb715be
|
||||||
|
pageserver-1.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-090044ed3d383fef0
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0b238612d2318a050
|
||||||
|
safekeeper-1.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-07b9c45e5c2637cd4
|
||||||
|
safekeeper-2.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-020257302c3c93d88
|
||||||
36
.github/ansible/prod.us-east-2.hosts.yaml
vendored
Normal file
36
.github/ansible/prod.us-east-2.hosts.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-us-east-2
|
||||||
|
bucket_region: us-east-2
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
etcd_endpoints: etcd-0.us-east-2.aws.neon.tech:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: us-east-2
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-us-east-2
|
||||||
|
console_region_id: aws-us-east-2
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-062227ba7f119eb8c
|
||||||
|
pageserver-1.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0b3ec0afab5968938
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0e94224750c57d346
|
||||||
|
safekeeper-1.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-06d113fb73bfddeb0
|
||||||
|
safekeeper-2.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-09f66c8e04afff2e8
|
||||||
|
|
||||||
37
.github/ansible/production.hosts.yaml
vendored
Normal file
37
.github/ansible/production.hosts.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
bucket_name: zenith-storage-oregon
|
||||||
|
bucket_region: us-west-2
|
||||||
|
etcd_endpoints: zenith-1-etcd.local:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "{{ inventory_hostname }}"
|
||||||
|
safekeeper_s3_prefix: prod-1/wal
|
||||||
|
hostname_suffix: ".local"
|
||||||
|
remote_user: admin
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
zenith-1-ps-2:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-3:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-4:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-5:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
zenith-1-sk-1:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-sk-2:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-sk-3:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
@@ -12,18 +12,19 @@ cat <<EOF | tee /tmp/payload
|
|||||||
"version": 1,
|
"version": 1,
|
||||||
"host": "${HOST}",
|
"host": "${HOST}",
|
||||||
"port": 6400,
|
"port": 6400,
|
||||||
"region_id": {{ console_region_id }},
|
"region_id": "{{ console_region_id }}",
|
||||||
"instance_id": "${INSTANCE_ID}",
|
"instance_id": "${INSTANCE_ID}",
|
||||||
"http_host": "${HOST}",
|
"http_host": "${HOST}",
|
||||||
"http_port": 9898
|
"http_port": 9898,
|
||||||
|
"active": false
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# check if pageserver already registered or not
|
# check if pageserver already registered or not
|
||||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
|
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
# not registered, so register it now
|
# not registered, so register it now
|
||||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
|
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
|
||||||
|
|
||||||
# init pageserver
|
# init pageserver
|
||||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
||||||
31
.github/ansible/scripts/init_safekeeper.sh
vendored
Normal file
31
.github/ansible/scripts/init_safekeeper.sh
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# fetch params from meta-data service
|
||||||
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
|
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
||||||
|
|
||||||
|
# store fqdn hostname in var
|
||||||
|
HOST=$(hostname -f)
|
||||||
|
|
||||||
|
|
||||||
|
cat <<EOF | tee /tmp/payload
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"host": "${HOST}",
|
||||||
|
"port": 6500,
|
||||||
|
"http_port": 7676,
|
||||||
|
"region_id": "{{ console_region_id }}",
|
||||||
|
"instance_id": "${INSTANCE_ID}",
|
||||||
|
"availability_zone_id": "${AZ_ID}",
|
||||||
|
"active": false
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# check if safekeeper already registered or not
|
||||||
|
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
|
# not registered, so register it now
|
||||||
|
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
|
||||||
|
# init safekeeper
|
||||||
|
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
||||||
|
fi
|
||||||
2
.github/ansible/ssm_config
vendored
Normal file
2
.github/ansible/ssm_config
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ansible_connection: aws_ssm
|
||||||
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
33
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
Normal file
33
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-dev-storage-eu-west-1
|
||||||
|
bucket_region: eu-west-1
|
||||||
|
console_mgmt_base_url: http://console-staging.local
|
||||||
|
etcd_endpoints: etcd-0.eu-west-1.aws.neon.build:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: eu-west-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-01d496c5041c7f34c
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-05226ef85722831bf
|
||||||
|
safekeeper-1.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-06969ee1bf2958bfc
|
||||||
|
safekeeper-2.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-087892e9625984a0b
|
||||||
34
.github/ansible/staging.hosts.yaml
vendored
Normal file
34
.github/ansible/staging.hosts.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: zenith-staging-storage-us-east-1
|
||||||
|
bucket_region: us-east-1
|
||||||
|
console_mgmt_base_url: http://console-staging.local
|
||||||
|
etcd_endpoints: etcd-0.us-east-2.aws.neon.build:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "{{ inventory_hostname }}"
|
||||||
|
safekeeper_s3_prefix: us-stage/wal
|
||||||
|
hostname_suffix: ".local"
|
||||||
|
remote_user: admin
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
zenith-us-stage-ps-2:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
|
zenith-us-stage-ps-3:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
|
zenith-us-stage-ps-4:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
zenith-us-stage-sk-4:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
|
zenith-us-stage-sk-5:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
|
zenith-us-stage-sk-6:
|
||||||
|
console_region_id: aws-us-east-1
|
||||||
35
.github/ansible/staging.us-east-2.hosts.yaml
vendored
Normal file
35
.github/ansible/staging.us-east-2.hosts.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-staging-storage-us-east-2
|
||||||
|
bucket_region: us-east-2
|
||||||
|
console_mgmt_base_url: http://console-staging.local
|
||||||
|
etcd_endpoints: etcd-0.us-east-2.aws.neon.build:2379
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: us-east-2
|
||||||
|
ansible_aws_ssm_bucket_name: neon-staging-storage-us-east-2
|
||||||
|
console_region_id: aws-us-east-2
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0c3e70929edb5d691
|
||||||
|
pageserver-1.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0565a8b4008aa3f40
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-027662bd552bf5db0
|
||||||
|
safekeeper-1.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0171efc3604a7b907
|
||||||
|
safekeeper-2.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0de0b03a51676a6ce
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Zenith pageserver
|
Description=Neon pageserver
|
||||||
After=network.target auditd.service
|
After=network.target auditd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=pageserver
|
User=pageserver
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/lib
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib
|
||||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoints=['{{ etcd_endpoints }}']" -D /storage/pageserver/data
|
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoints=['{{ etcd_endpoints }}']" -D /storage/pageserver/data
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
18
.github/ansible/systemd/safekeeper.service
vendored
Normal file
18
.github/ansible/systemd/safekeeper.service
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Neon safekeeper
|
||||||
|
After=network.target auditd.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=safekeeper
|
||||||
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib
|
||||||
|
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ safekeeper_s3_prefix }}"}'
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
KillMode=mixed
|
||||||
|
KillSignal=SIGINT
|
||||||
|
Restart=on-failure
|
||||||
|
TimeoutSec=10
|
||||||
|
LimitNOFILE=30000000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
1
.github/ansible/templates/pageserver.toml.j2
vendored
Normal file
1
.github/ansible/templates/pageserver.toml.j2
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{{ pageserver_config | sivel.toiletwater.to_toml }}
|
||||||
31
.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.eu-west-1.aws.neon.build"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: eu-west-1
|
||||||
|
zenith_region_slug: eu-west-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
31
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.us-east-2.aws.neon.build"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.build
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
fullnameOverride: "neon-stress-proxy"
|
fullnameOverride: "neon-stress-proxy"
|
||||||
|
|
||||||
settings:
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
authEndpoint: "https://console.dev.neon.tech/authenticate_proxy_request/"
|
authEndpoint: "https://console.dev.neon.tech/authenticate_proxy_request/"
|
||||||
uri: "https://console.dev.neon.tech/psql_session/"
|
uri: "https://console.dev.neon.tech/psql_session/"
|
||||||
|
|
||||||
31
.github/helm-values/prod-ap-southeast-1-epsilon.neon-proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/prod-ap-southeast-1-epsilon.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.ap-southeast-1.aws.neon.tech"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: ap-southeast-1
|
||||||
|
zenith_region_slug: ap-southeast-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: ap-southeast-1.aws.neon.tech
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
31
.github/helm-values/prod-eu-central-1-gamma.neon-proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/prod-eu-central-1-gamma.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.eu-central-1.aws.neon.tech"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: eu-central-1
|
||||||
|
zenith_region_slug: eu-central-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: eu-central-1.aws.neon.tech
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
31
.github/helm-values/prod-us-east-2-delta.neon-proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/prod-us-east-2-delta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.us-east-2.aws.neon.tech"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.tech
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
settings:
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
||||||
uri: "https://console.neon.tech/psql_session/"
|
uri: "https://console.neon.tech/psql_session/"
|
||||||
|
|
||||||
@@ -5,6 +5,7 @@ image:
|
|||||||
repository: neondatabase/neon
|
repository: neondatabase/neon
|
||||||
|
|
||||||
settings:
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||||
uri: "https://console.stage.neon.tech/psql_session/"
|
uri: "https://console.stage.neon.tech/psql_session/"
|
||||||
|
|
||||||
232
.github/workflows/benchmarking.yml
vendored
232
.github/workflows/benchmarking.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: benchmarking
|
name: Benchmarking
|
||||||
|
|
||||||
on:
|
on:
|
||||||
# uncomment to run on push for debugging your PR
|
# uncomment to run on push for debugging your PR
|
||||||
@@ -11,9 +11,29 @@ on:
|
|||||||
# │ │ ┌───────────── day of the month (1 - 31)
|
# │ │ ┌───────────── day of the month (1 - 31)
|
||||||
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
||||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||||
- cron: '36 7 * * *' # run once a day, timezone is utc
|
- cron: '0 3 * * *' # run once a day, timezone is utc
|
||||||
|
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
description: 'Environment to run remote tests on (dev or staging)'
|
||||||
|
required: false
|
||||||
|
region_id:
|
||||||
|
description: 'Use a particular region. If not set the default region will be used'
|
||||||
|
required: false
|
||||||
|
save_perf_report:
|
||||||
|
type: boolean
|
||||||
|
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
||||||
|
required: false
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# Allow only one workflow per any non-`main` branch.
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
bench:
|
bench:
|
||||||
@@ -26,11 +46,12 @@ jobs:
|
|||||||
runs-on: [self-hosted, zenith-benchmarker]
|
runs-on: [self-hosted, zenith-benchmarker]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: "/usr/pgsql-13"
|
POSTGRES_DISTRIB_DIR: /usr/pgsql
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout zenith repo
|
- name: Checkout zenith repo
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
# actions/setup-python@v2 is not working correctly on self-hosted runners
|
# actions/setup-python@v2 is not working correctly on self-hosted runners
|
||||||
# see https://github.com/actions/setup-python/issues/162
|
# see https://github.com/actions/setup-python/issues/162
|
||||||
@@ -51,22 +72,14 @@ jobs:
|
|||||||
echo Poetry
|
echo Poetry
|
||||||
poetry --version
|
poetry --version
|
||||||
echo Pgbench
|
echo Pgbench
|
||||||
$POSTGRES_DISTRIB_DIR/bin/pgbench --version
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
|
||||||
# FIXME cluster setup is skipped due to various changes in console API
|
- name: Create Neon Project
|
||||||
# for now pre created cluster is used. When API gain some stability
|
id: create-neon-project
|
||||||
# after massive changes dynamic cluster setup will be revived.
|
uses: ./.github/actions/neon-project-create
|
||||||
# So use pre created cluster. It needs to be started manually, but stop is automatic after 5 minutes of inactivity
|
with:
|
||||||
- name: Setup cluster
|
environment: ${{ github.event.inputs.environment || 'staging' }}
|
||||||
env:
|
api_key: ${{ ( github.event.inputs.environment || 'staging' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
||||||
BENCHMARK_CONNSTR: "${{ secrets.BENCHMARK_STAGING_CONNSTR }}"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "Starting cluster"
|
|
||||||
# wake up the cluster
|
|
||||||
$POSTGRES_DISTRIB_DIR/bin/psql $BENCHMARK_CONNSTR -c "SELECT 1"
|
|
||||||
|
|
||||||
- name: Run benchmark
|
- name: Run benchmark
|
||||||
# pgbench is installed system wide from official repo
|
# pgbench is installed system wide from official repo
|
||||||
@@ -88,15 +101,17 @@ jobs:
|
|||||||
# Plus time needed to initialize the test databases.
|
# Plus time needed to initialize the test databases.
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||||
PLATFORM: "zenith-staging"
|
PLATFORM: "neon-staging"
|
||||||
BENCHMARK_CONNSTR: "${{ secrets.BENCHMARK_STAGING_CONNSTR }}"
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
REMOTE_ENV: "1" # indicate to test harness that we do not have zenith binaries locally
|
REMOTE_ENV: "1" # indicate to test harness that we do not have zenith binaries locally
|
||||||
run: |
|
run: |
|
||||||
# just to be sure that no data was cached on self hosted runner
|
# just to be sure that no data was cached on self hosted runner
|
||||||
# since it might generate duplicates when calling ingest_perf_test_result.py
|
# since it might generate duplicates when calling ingest_perf_test_result.py
|
||||||
rm -rf perf-report-staging
|
rm -rf perf-report-staging
|
||||||
mkdir -p perf-report-staging
|
mkdir -p perf-report-staging
|
||||||
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --skip-interfering-proc-check --out-dir perf-report-staging
|
# Set --sparse-ordering option of pytest-order plugin to ensure tests are running in order of appears in the file,
|
||||||
|
# it's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
|
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --sparse-ordering --out-dir perf-report-staging --timeout 5400
|
||||||
|
|
||||||
- name: Submit result
|
- name: Submit result
|
||||||
env:
|
env:
|
||||||
@@ -104,3 +119,176 @@ jobs:
|
|||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
run: |
|
run: |
|
||||||
REPORT_FROM=$(realpath perf-report-staging) REPORT_TO=staging scripts/generate_and_push_perf_report.sh
|
REPORT_FROM=$(realpath perf-report-staging) REPORT_TO=staging scripts/generate_and_push_perf_report.sh
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
with:
|
||||||
|
environment: staging
|
||||||
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic perf testing: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
pgbench-compare:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# neon-captest-new: Run pgbench in a freshly created project
|
||||||
|
# neon-captest-reuse: Same, but reusing existing project
|
||||||
|
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
||||||
|
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-postgres ]
|
||||||
|
db_size: [ 10gb ]
|
||||||
|
include:
|
||||||
|
- platform: neon-captest-new
|
||||||
|
db_size: 50gb
|
||||||
|
- platform: neon-captest-prefetch
|
||||||
|
db_size: 50gb
|
||||||
|
- platform: rds-aurora
|
||||||
|
db_size: 50gb
|
||||||
|
|
||||||
|
env:
|
||||||
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, dev, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rustlegacy:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
timeout-minutes: 360 # 6h
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Download Neon artifact
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
|
path: /tmp/neon/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Add Postgres binaries to PATH
|
||||||
|
run: |
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Create Neon Project
|
||||||
|
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
|
||||||
|
id: create-neon-project
|
||||||
|
uses: ./.github/actions/neon-project-create
|
||||||
|
with:
|
||||||
|
environment: ${{ github.event.inputs.environment || 'dev' }}
|
||||||
|
api_key: ${{ ( github.event.inputs.environment || 'dev' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
||||||
|
|
||||||
|
- name: Set up Connection String
|
||||||
|
id: set-up-connstr
|
||||||
|
run: |
|
||||||
|
case "${PLATFORM}" in
|
||||||
|
neon-captest-reuse)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
||||||
|
;;
|
||||||
|
neon-captest-new | neon-captest-prefetch)
|
||||||
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
;;
|
||||||
|
rds-aurora)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-postgres)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
|
- name: Set database options
|
||||||
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
|
run: |
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET seqscan_prefetch_buffers=10"
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Benchmark init
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
|
- name: Benchmark simple-update
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
|
- name: Benchmark select-only
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: generate
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
if: ${{ steps.create-neon-project.outputs.project_id && always() }}
|
||||||
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
with:
|
||||||
|
environment: dev
|
||||||
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
api_key: ${{ secrets.NEON_CAPTEST_API_KEY }}
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|||||||
1004
.github/workflows/build_and_test.yml
vendored
1004
.github/workflows/build_and_test.yml
vendored
File diff suppressed because it is too large
Load Diff
121
.github/workflows/codestyle.yml
vendored
121
.github/workflows/codestyle.yml
vendored
@@ -8,43 +8,39 @@ on:
|
|||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash -ex {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
# Allow only one workflow per any non-`main` branch.
|
||||||
cancel-in-progress: true
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
|
COPT: '-Werror'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-codestyle-rust:
|
check-codestyle-rust:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# If we want to duplicate this job for different
|
# XXX: both OSes have rustup
|
||||||
# Rust toolchains (e.g. nightly or 1.37.0), add them here.
|
# * https://github.com/actions/runner-images/blob/main/images/macos/macos-12-Readme.md#rust-tools
|
||||||
rust_toolchain: [1.58]
|
# * https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md#rust-tools
|
||||||
|
# this is all we need to install our toolchain later via rust-toolchain.toml
|
||||||
|
# so don't install any toolchain explicitly.
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
timeout-minutes: 50
|
timeout-minutes: 90
|
||||||
name: run regression test suite
|
name: check codestyle rust and postgres
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Install rust toolchain ${{ matrix.rust_toolchain }}
|
|
||||||
uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
profile: minimal
|
|
||||||
toolchain: ${{ matrix.rust_toolchain }}
|
|
||||||
components: rustfmt, clippy
|
|
||||||
override: true
|
|
||||||
|
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
run: cargo fmt --all -- --check
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
@@ -58,17 +54,29 @@ jobs:
|
|||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
run: brew install flex bison openssl
|
run: brew install flex bison openssl
|
||||||
|
|
||||||
- name: Set pg revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_ver
|
id: pg_v14_rev
|
||||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres)
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Cache postgres build
|
- name: Set pg 15 revision for caching
|
||||||
id: cache_pg
|
id: pg_v15_rev
|
||||||
uses: actions/cache@v2
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg_14
|
||||||
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: pg_install/v14
|
||||||
tmp_install/
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
key: ${{ runner.os }}-pg-${{ steps.pg_ver.outputs.pg_rev }}
|
|
||||||
|
- name: Cache postgres v15 build
|
||||||
|
id: cache_pg_15
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
- name: Set extra env for macOS
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
@@ -76,36 +84,55 @@ jobs:
|
|||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Build postgres
|
- name: Build postgres v14
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
run: make postgres
|
run: make postgres-v14
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
# Plain configure output can contain weird errors like 'error: C compiler cannot create executables'
|
- name: Build postgres v15
|
||||||
# and the real cause will be inside config.log
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
- name: Print configure logs in case of failure
|
run: make postgres-v15
|
||||||
if: failure()
|
shell: bash -euxo pipefail {0}
|
||||||
continue-on-error: true
|
|
||||||
run: |
|
- name: Build neon extensions
|
||||||
echo '' && echo '=== config.log ===' && echo ''
|
run: make neon-pg-ext
|
||||||
cat tmp_install/build/config.log
|
|
||||||
echo '' && echo '=== configure.log ===' && echo ''
|
|
||||||
cat tmp_install/build/configure.log
|
|
||||||
|
|
||||||
- name: Cache cargo deps
|
- name: Cache cargo deps
|
||||||
id: cache_cargo
|
id: cache_cargo
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cargo/registry
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
~/.cargo/git
|
~/.cargo/git
|
||||||
target
|
target
|
||||||
key: ${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-rust-${{ matrix.rust_toolchain }}
|
key: v6-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-rust
|
||||||
|
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: ./run_clippy.sh
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
- name: Ensure all project builds
|
- name: Ensure all project builds
|
||||||
run: cargo build --all --all-targets
|
run: cargo build --locked --all --all-targets
|
||||||
|
|
||||||
|
check-rust-dependencies:
|
||||||
|
runs-on: [ self-hosted, dev, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: false
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
||||||
|
- name: Check every project module is covered by Hakari
|
||||||
|
run: |
|
||||||
|
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
||||||
|
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
check-codestyle-python:
|
check-codestyle-python:
|
||||||
runs-on: [ self-hosted, Linux, k8s-runner ]
|
runs-on: [ self-hosted, Linux, k8s-runner ]
|
||||||
@@ -126,8 +153,14 @@ jobs:
|
|||||||
- name: Install Python deps
|
- name: Install Python deps
|
||||||
run: ./scripts/pysync
|
run: ./scripts/pysync
|
||||||
|
|
||||||
- name: Run yapf to ensure code format
|
- name: Run isort to ensure code format
|
||||||
run: poetry run yapf --recursive --diff .
|
run: poetry run isort --diff --check .
|
||||||
|
|
||||||
|
- name: Run black to ensure code format
|
||||||
|
run: poetry run black --diff --check .
|
||||||
|
|
||||||
|
- name: Run flake8 to ensure code format
|
||||||
|
run: poetry run flake8 .
|
||||||
|
|
||||||
- name: Run mypy to check types
|
- name: Run mypy to check types
|
||||||
run: poetry run mypy .
|
run: poetry run mypy .
|
||||||
|
|||||||
45
.github/workflows/notifications.yml
vendored
45
.github/workflows/notifications.yml
vendored
@@ -1,45 +0,0 @@
|
|||||||
name: Send Notifications
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ main ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
send-notifications:
|
|
||||||
timeout-minutes: 30
|
|
||||||
name: send commit notifications
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Form variables for notification message
|
|
||||||
id: git_info_grab
|
|
||||||
run: |
|
|
||||||
git_stat=$(git show --stat=50)
|
|
||||||
git_stat="${git_stat//'%'/'%25'}"
|
|
||||||
git_stat="${git_stat//$'\n'/'%0A'}"
|
|
||||||
git_stat="${git_stat//$'\r'/'%0D'}"
|
|
||||||
git_stat="${git_stat// / }" # space -> 'Space En', as github tends to eat ordinary spaces
|
|
||||||
echo "::set-output name=git_stat::$git_stat"
|
|
||||||
echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
|
|
||||||
echo "##[set-output name=git_branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
|
||||||
|
|
||||||
- name: Send notification
|
|
||||||
uses: appleboy/telegram-action@master
|
|
||||||
with:
|
|
||||||
to: ${{ secrets.TELEGRAM_TO }}
|
|
||||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
|
||||||
format: markdown
|
|
||||||
args: |
|
|
||||||
*@${{ github.actor }} pushed to* [${{ github.repository }}:${{steps.git_info_grab.outputs.git_branch}}](github.com/${{ github.repository }}/commit/${{steps.git_info_grab.outputs.sha_short }})
|
|
||||||
|
|
||||||
```
|
|
||||||
${{ steps.git_info_grab.outputs.git_stat }}
|
|
||||||
```
|
|
||||||
|
|
||||||
49
.github/workflows/pg_clients.yml
vendored
49
.github/workflows/pg_clients.yml
vendored
@@ -13,13 +13,18 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
# Allow only one workflow per any non-`main` branch.
|
||||||
cancel-in-progress: true
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-postgres-client-libs:
|
test-postgres-client-libs:
|
||||||
|
# TODO: switch to gen2 runner, requires docker
|
||||||
runs-on: [ ubuntu-latest ]
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -39,23 +44,26 @@ jobs:
|
|||||||
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
|
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Python deps
|
- name: Install Python deps
|
||||||
shell: bash -ex {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: ./scripts/pysync
|
run: ./scripts/pysync
|
||||||
|
|
||||||
|
- name: Create Neon Project
|
||||||
|
id: create-neon-project
|
||||||
|
uses: ./.github/actions/neon-project-create
|
||||||
|
with:
|
||||||
|
environment: staging
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
REMOTE_ENV: 1
|
REMOTE_ENV: 1
|
||||||
BENCHMARK_CONNSTR: "${{ secrets.BENCHMARK_STAGING_CONNSTR }}"
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
# this variable will be embedded in perf test report
|
shell: bash -euxo pipefail {0}
|
||||||
# and is needed to distinguish different environments
|
|
||||||
PLATFORM: github-actions-selfhosted
|
|
||||||
shell: bash -ex {0}
|
|
||||||
run: |
|
run: |
|
||||||
# Test framework expects we have psql binary;
|
# Test framework expects we have psql binary;
|
||||||
# but since we don't really need it in this test, let's mock it
|
# but since we don't really need it in this test, let's mock it
|
||||||
mkdir -p "$POSTGRES_DISTRIB_DIR/bin" && touch "$POSTGRES_DISTRIB_DIR/bin/psql";
|
mkdir -p "$POSTGRES_DISTRIB_DIR/v14/bin" && touch "$POSTGRES_DISTRIB_DIR/v14/bin/psql";
|
||||||
./scripts/pytest \
|
./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
--tb=short \
|
--tb=short \
|
||||||
@@ -63,9 +71,26 @@ jobs:
|
|||||||
-m "remote_cluster" \
|
-m "remote_cluster" \
|
||||||
-rA "test_runner/pg_clients"
|
-rA "test_runner/pg_clients"
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
with:
|
||||||
|
environment: staging
|
||||||
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
|
# We use GitHub's action upload-artifact because `ubuntu-latest` doesn't have configured AWS CLI.
|
||||||
|
# It will be fixed after switching to gen2 runner
|
||||||
|
- name: Upload python test logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
retention-days: 7
|
||||||
|
name: python-test-pg_clients-${{ runner.os }}-stage-logs
|
||||||
|
path: ${{ env.TEST_OUTPUT }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: failure()
|
if: ${{ github.event.schedule && failure() }}
|
||||||
id: slack
|
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,6 @@
|
|||||||
|
/pg_install
|
||||||
/target
|
/target
|
||||||
/tmp_check
|
/tmp_check
|
||||||
/tmp_install
|
|
||||||
/tmp_check_cli
|
/tmp_check_cli
|
||||||
__pycache__/
|
__pycache__/
|
||||||
test_output/
|
test_output/
|
||||||
@@ -15,3 +15,6 @@ test_output/
|
|||||||
|
|
||||||
*.key
|
*.key
|
||||||
*.crt
|
*.crt
|
||||||
|
*.o
|
||||||
|
*.so
|
||||||
|
*.Po
|
||||||
|
|||||||
12
.gitmodules
vendored
12
.gitmodules
vendored
@@ -1,4 +1,8 @@
|
|||||||
[submodule "vendor/postgres"]
|
[submodule "vendor/postgres-v14"]
|
||||||
path = vendor/postgres
|
path = vendor/postgres-v14
|
||||||
url = https://github.com/zenithdb/postgres
|
url = https://github.com/neondatabase/postgres.git
|
||||||
branch = main
|
branch = REL_14_STABLE_neon
|
||||||
|
[submodule "vendor/postgres-v15"]
|
||||||
|
path = vendor/postgres-v15
|
||||||
|
url = https://github.com/neondatabase/postgres.git
|
||||||
|
branch = REL_15_STABLE_neon
|
||||||
|
|||||||
10
.yapfignore
10
.yapfignore
@@ -1,10 +0,0 @@
|
|||||||
# This file is only read when `yapf` is run from this directory.
|
|
||||||
# Hence we only top-level directories here to avoid confusion.
|
|
||||||
# See source code for the exact file format: https://github.com/google/yapf/blob/c6077954245bc3add82dafd853a1c7305a6ebd20/yapf/yapflib/file_resources.py#L40-L43
|
|
||||||
vendor/
|
|
||||||
target/
|
|
||||||
tmp_install/
|
|
||||||
__pycache__/
|
|
||||||
test_output/
|
|
||||||
.neon/
|
|
||||||
.git/
|
|
||||||
11
CODEOWNERS
Normal file
11
CODEOWNERS
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
/compute_tools/ @neondatabase/control-plane
|
||||||
|
/control_plane/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/libs/pageserver_api/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/libs/postgres_ffi/ @neondatabase/compute
|
||||||
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/safekeepers
|
||||||
|
/pageserver/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/pgxn/ @neondatabase/compute
|
||||||
|
/proxy/ @neondatabase/control-plane
|
||||||
|
/safekeeper/ @neondatabase/safekeepers
|
||||||
|
/vendor/ @neondatabase/compute
|
||||||
@@ -11,17 +11,15 @@ than it was before.
|
|||||||
|
|
||||||
## Submitting changes
|
## Submitting changes
|
||||||
|
|
||||||
1. Make a PR for every change.
|
1. Get at least one +1 on your PR before you push.
|
||||||
|
|
||||||
Even seemingly trivial patches can break things in surprising ways.
|
|
||||||
Use of common sense is OK. If you're only fixing a typo in a comment,
|
|
||||||
it's probably fine to just push it. But if in doubt, open a PR.
|
|
||||||
|
|
||||||
2. Get at least one +1 on your PR before you push.
|
|
||||||
|
|
||||||
For simple patches, it will only take a minute for someone to review
|
For simple patches, it will only take a minute for someone to review
|
||||||
it.
|
it.
|
||||||
|
|
||||||
|
2. Don't force push small changes after making the PR ready for review.
|
||||||
|
Doing so will force readers to re-read your entire PR, which will delay
|
||||||
|
the review process.
|
||||||
|
|
||||||
3. Always keep the CI green.
|
3. Always keep the CI green.
|
||||||
|
|
||||||
Do not push, if the CI failed on your PR. Even if you think it's not
|
Do not push, if the CI failed on your PR. Even if you think it's not
|
||||||
|
|||||||
1795
Cargo.lock
generated
1795
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
71
Cargo.toml
71
Cargo.toml
@@ -1,3 +1,14 @@
|
|||||||
|
# 'named-profiles' feature was stabilized in cargo 1.57. This line makes the
|
||||||
|
# build work with older cargo versions.
|
||||||
|
#
|
||||||
|
# We have this because as of this writing, the latest cargo Debian package
|
||||||
|
# that's available is 1.56. (Confusingly, the Debian package version number
|
||||||
|
# is 0.57, whereas 'cargo --version' says 1.56.)
|
||||||
|
#
|
||||||
|
# See https://tracker.debian.org/pkg/cargo for the current status of the
|
||||||
|
# package. When that gets updated, we can remove this.
|
||||||
|
cargo-features = ["named-profiles"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"compute_tools",
|
"compute_tools",
|
||||||
@@ -6,7 +17,6 @@ members = [
|
|||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
"neon_local",
|
|
||||||
"libs/*",
|
"libs/*",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -15,7 +25,64 @@ members = [
|
|||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
|
# disable debug symbols for all packages except this one to decrease binaries size
|
||||||
|
[profile.release.package."*"]
|
||||||
|
debug = false
|
||||||
|
|
||||||
|
[profile.release-line-debug]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
[profile.release-line-debug-lto]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
[profile.release-line-debug-size]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "s"
|
||||||
|
[profile.release-line-debug-zize]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "z"
|
||||||
|
[profile.release-line-debug-size-lto]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "s"
|
||||||
|
lto = true
|
||||||
|
[profile.release-line-debug-zize-lto]
|
||||||
|
inherits = "release"
|
||||||
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "z"
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
[profile.release-no-debug]
|
||||||
|
inherits = "release"
|
||||||
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
|
|
||||||
|
[profile.release-no-debug-size]
|
||||||
|
inherits = "release"
|
||||||
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "s"
|
||||||
|
[profile.release-no-debug-zize]
|
||||||
|
inherits = "release"
|
||||||
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "z"
|
||||||
|
|
||||||
|
[profile.release-no-debug-size-lto]
|
||||||
|
inherits = "release"
|
||||||
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "s"
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
[profile.release-no-debug-zize-lto]
|
||||||
|
inherits = "release"
|
||||||
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
|
opt-level = "z"
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
# This is only needed for proxy's tests.
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
|
|||||||
85
Dockerfile
85
Dockerfile
@@ -1,33 +1,50 @@
|
|||||||
|
### Creates a storage Docker image with postgres, pageserver, safekeeper and proxy binaries.
|
||||||
|
### The image itself is mainly used as a container for the binaries and for starting e2e tests with custom parameters.
|
||||||
|
### By default, the binaries inside the image have some mock parameters and can start, but are not intended to be used
|
||||||
|
### inside this image in the real deployments.
|
||||||
|
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
ARG IMAGE=rust
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
# Build Postgres
|
# Build Postgres
|
||||||
FROM neondatabase/rust:1.58 AS pg-build
|
FROM $REPOSITORY/$IMAGE:$TAG AS pg-build
|
||||||
WORKDIR /pg
|
WORKDIR /home/nonroot
|
||||||
|
|
||||||
USER root
|
COPY --chown=nonroot vendor/postgres-v14 vendor/postgres-v14
|
||||||
|
COPY --chown=nonroot vendor/postgres-v15 vendor/postgres-v15
|
||||||
COPY vendor/postgres vendor/postgres
|
COPY --chown=nonroot pgxn pgxn
|
||||||
COPY Makefile Makefile
|
COPY --chown=nonroot Makefile Makefile
|
||||||
|
COPY --chown=nonroot scripts/ninstall.sh scripts/ninstall.sh
|
||||||
|
|
||||||
ENV BUILD_TYPE release
|
ENV BUILD_TYPE release
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& mold -run make -j $(nproc) -s postgres \
|
&& mold -run make -j $(nproc) -s neon-pg-ext \
|
||||||
&& rm -rf tmp_install/build \
|
&& rm -rf pg_install/build \
|
||||||
&& tar -C tmp_install -czf /postgres_install.tar.gz .
|
&& tar -C pg_install -czf /home/nonroot/postgres_install.tar.gz .
|
||||||
|
|
||||||
# Build zenith binaries
|
# Build neon binaries
|
||||||
FROM neondatabase/rust:1.58 AS build
|
FROM $REPOSITORY/$IMAGE:$TAG AS build
|
||||||
|
WORKDIR /home/nonroot
|
||||||
ARG GIT_VERSION=local
|
ARG GIT_VERSION=local
|
||||||
|
|
||||||
ARG CACHEPOT_BUCKET=zenith-rust-cachepot
|
# Enable https://github.com/paritytech/cachepot to cache Rust crates' compilation results in Docker builds.
|
||||||
ARG AWS_ACCESS_KEY_ID
|
# Set up cachepot to use an AWS S3 bucket for cache results, to reuse it between `docker build` invocations.
|
||||||
ARG AWS_SECRET_ACCESS_KEY
|
# cachepot falls back to local filesystem if S3 is misconfigured, not failing the build
|
||||||
|
ARG RUSTC_WRAPPER=cachepot
|
||||||
|
ENV AWS_REGION=eu-central-1
|
||||||
|
ENV CACHEPOT_S3_KEY_PREFIX=cachepot
|
||||||
|
ARG CACHEPOT_BUCKET=neon-github-dev
|
||||||
|
#ARG AWS_ACCESS_KEY_ID
|
||||||
|
#ARG AWS_SECRET_ACCESS_KEY
|
||||||
|
|
||||||
COPY --from=pg-build /pg/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
COPY --from=pg-build /home/nonroot/pg_install/v14/include/postgresql/server pg_install/v14/include/postgresql/server
|
||||||
|
COPY --from=pg-build /home/nonroot/pg_install/v15/include/postgresql/server pg_install/v15/include/postgresql/server
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Show build caching stats to check if it was used in the end.
|
# Show build caching stats to check if it was used in the end.
|
||||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& sudo -E "PATH=$PATH" mold -run cargo build --release \
|
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin proxy --locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Build final image
|
# Build final image
|
||||||
@@ -36,27 +53,37 @@ FROM debian:bullseye-slim
|
|||||||
WORKDIR /data
|
WORKDIR /data
|
||||||
|
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& apt-get update \
|
&& apt update \
|
||||||
&& apt-get install -y \
|
&& apt install -y \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libseccomp-dev \
|
libseccomp-dev \
|
||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
&& useradd -d /data zenith \
|
&& useradd -d /data neon \
|
||||||
&& chown -R zenith:zenith /data
|
&& chown -R neon:neon /data
|
||||||
|
|
||||||
COPY --from=build --chown=zenith:zenith /home/runner/target/release/pageserver /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
||||||
COPY --from=build --chown=zenith:zenith /home/runner/target/release/safekeeper /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
||||||
COPY --from=build --chown=zenith:zenith /home/runner/target/release/proxy /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||||
|
|
||||||
COPY --from=pg-build /pg/tmp_install/ /usr/local/
|
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
||||||
COPY --from=pg-build /postgres_install.tar.gz /data/
|
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
||||||
|
COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/
|
||||||
|
|
||||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
# By default, pageserver uses `.neon/` working directory in WORKDIR, so create one and fill it with the dummy config.
|
||||||
|
# Now, when `docker run ... pageserver` is run, it can start without errors, yet will have some default dummy values.
|
||||||
|
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
|
||||||
|
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
|
||||||
|
-c "id=1234" \
|
||||||
|
-c "broker_endpoints=['http://etcd:2379']" \
|
||||||
|
-c "pg_distrib_dir='/usr/local/'" \
|
||||||
|
-c "listen_pg_addr='0.0.0.0:6400'" \
|
||||||
|
-c "listen_http_addr='0.0.0.0:9898'"
|
||||||
|
|
||||||
VOLUME ["/data"]
|
VOLUME ["/data"]
|
||||||
USER zenith
|
USER neon
|
||||||
EXPOSE 6400
|
EXPOSE 6400
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
EXPOSE 9898
|
||||||
CMD ["pageserver"]
|
|
||||||
|
|||||||
218
Dockerfile.compute-node-v14
Normal file
218
Dockerfile.compute-node-v14
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
#
|
||||||
|
# This file is identical to the Dockerfile.compute-node-v15 file
|
||||||
|
# except for the version of Postgres that is built.
|
||||||
|
#
|
||||||
|
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
COPY vendor/postgres-v14 postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
||||||
|
tar xvzf postgis-3.3.1.tar.gz && \
|
||||||
|
cd postgis-3.3.1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
./configure && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
||||||
|
|
||||||
|
# https://github.com/plv8/plv8/issues/475:
|
||||||
|
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
||||||
|
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
||||||
|
# Install newer gold version manually as debian-testing binutils version updates
|
||||||
|
# libc version, which in turn breaks other extension built against non-testing libc.
|
||||||
|
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
||||||
|
tar xvzf binutils-2.38.tar.gz && \
|
||||||
|
cd binutils-2.38 && \
|
||||||
|
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
cd ../bfd && ./configure && make bfdver.h && \
|
||||||
|
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
||||||
|
cp /usr/local/bin/ld.gold /usr/bin/gold
|
||||||
|
|
||||||
|
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||||
|
tar xvzf v3.1.4.tar.gz && \
|
||||||
|
cd plv8-3.1.4 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
||||||
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "h3-pg-build"
|
||||||
|
# Build h3_pg
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS h3-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# packaged cmake is too old
|
||||||
|
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||||
|
-q -O /tmp/cmake-install.sh \
|
||||||
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|
||||||
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||||
|
tar xvzf h3.tgz && \
|
||||||
|
cd h3-4.0.1 && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 make install && \
|
||||||
|
cp -R /h3/usr / && \
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||||
|
tar xvzf h3-pg.tgz && \
|
||||||
|
cd h3-pg-4.0.1 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /h3/usr /
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove now-useless PGXS src infrastructure
|
||||||
|
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
#
|
||||||
|
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||||
|
# so that we don't need to put this in another layer.
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
218
Dockerfile.compute-node-v15
Normal file
218
Dockerfile.compute-node-v15
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
#
|
||||||
|
# This file is identical to the Dockerfile.compute-node-v14 file
|
||||||
|
# except for the version of Postgres that is built.
|
||||||
|
#
|
||||||
|
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
COPY vendor/postgres-v15 postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
||||||
|
tar xvzf postgis-3.3.1.tar.gz && \
|
||||||
|
cd postgis-3.3.1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
./configure && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
||||||
|
|
||||||
|
# https://github.com/plv8/plv8/issues/475:
|
||||||
|
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
||||||
|
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
||||||
|
# Install newer gold version manually as debian-testing binutils version updates
|
||||||
|
# libc version, which in turn breaks other extension built against non-testing libc.
|
||||||
|
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
||||||
|
tar xvzf binutils-2.38.tar.gz && \
|
||||||
|
cd binutils-2.38 && \
|
||||||
|
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
cd ../bfd && ./configure && make bfdver.h && \
|
||||||
|
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
||||||
|
cp /usr/local/bin/ld.gold /usr/bin/gold
|
||||||
|
|
||||||
|
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||||
|
tar xvzf v3.1.4.tar.gz && \
|
||||||
|
cd plv8-3.1.4 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
||||||
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "h3-pg-build"
|
||||||
|
# Build h3_pg
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS h3-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# packaged cmake is too old
|
||||||
|
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||||
|
-q -O /tmp/cmake-install.sh \
|
||||||
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|
||||||
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||||
|
tar xvzf h3.tgz && \
|
||||||
|
cd h3-4.0.1 && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 make install && \
|
||||||
|
cp -R /h3/usr / && \
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||||
|
tar xvzf h3-pg.tgz && \
|
||||||
|
cd h3-pg-4.0.1 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /h3/usr /
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove now-useless PGXS src infrastructure
|
||||||
|
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
#
|
||||||
|
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||||
|
# so that we don't need to put this in another layer.
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
@@ -1,18 +1,29 @@
|
|||||||
# First transient image to build compute_tools binaries
|
# First transient image to build compute_tools binaries
|
||||||
# NB: keep in sync with rust image version in .circle/config.yml
|
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
|
||||||
FROM neondatabase/rust:1.58 AS rust-build
|
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
ARG IMAGE=rust
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
ARG CACHEPOT_BUCKET=zenith-rust-cachepot
|
FROM $REPOSITORY/$IMAGE:$TAG AS rust-build
|
||||||
ARG AWS_ACCESS_KEY_ID
|
WORKDIR /home/nonroot
|
||||||
ARG AWS_SECRET_ACCESS_KEY
|
|
||||||
|
# Enable https://github.com/paritytech/cachepot to cache Rust crates' compilation results in Docker builds.
|
||||||
|
# Set up cachepot to use an AWS S3 bucket for cache results, to reuse it between `docker build` invocations.
|
||||||
|
# cachepot falls back to local filesystem if S3 is misconfigured, not failing the build.
|
||||||
|
ARG RUSTC_WRAPPER=cachepot
|
||||||
|
ENV AWS_REGION=eu-central-1
|
||||||
|
ENV CACHEPOT_S3_KEY_PREFIX=cachepot
|
||||||
|
ARG CACHEPOT_BUCKET=neon-github-dev
|
||||||
|
#ARG AWS_ACCESS_KEY_ID
|
||||||
|
#ARG AWS_SECRET_ACCESS_KEY
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& sudo -E "PATH=$PATH" mold -run cargo build -p compute_tools --release \
|
&& mold -run cargo build -p compute_tools --locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Final image that only has one binary
|
# Final image that only has one binary
|
||||||
FROM debian:buster-slim
|
FROM debian:bullseye-slim
|
||||||
|
|
||||||
COPY --from=rust-build /home/runner/target/release/compute_ctl /usr/local/bin/compute_ctl
|
COPY --from=rust-build /home/nonroot/target/release/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|||||||
204
Makefile
204
Makefile
@@ -1,10 +1,7 @@
|
|||||||
# Seccomp BPF is only available for Linux
|
ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||||
UNAME_S := $(shell uname -s)
|
|
||||||
ifeq ($(UNAME_S),Linux)
|
# Where to install Postgres, default is ./pg_install, maybe useful for package managers
|
||||||
SECCOMP = --with-libseccomp
|
POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/
|
||||||
else
|
|
||||||
SECCOMP =
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# We differentiate between release / debug build types using the BUILD_TYPE
|
# We differentiate between release / debug build types using the BUILD_TYPE
|
||||||
@@ -23,12 +20,26 @@ else
|
|||||||
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# macOS with brew-installed openssl requires explicit paths
|
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Linux)
|
||||||
PG_CONFIGURE_OPTS += --with-includes=$(HOMEBREW_PREFIX)/opt/openssl/include --with-libraries=$(HOMEBREW_PREFIX)/opt/openssl/lib
|
# Seccomp BPF is only available for Linux
|
||||||
|
PG_CONFIGURE_OPTS += --with-libseccomp
|
||||||
|
else ifeq ($(UNAME_S),Darwin)
|
||||||
|
# macOS with brew-installed openssl requires explicit paths
|
||||||
|
# It can be configured with OPENSSL_PREFIX variable
|
||||||
|
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
||||||
|
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
||||||
|
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
|
||||||
|
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
|
||||||
|
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# Use -C option so that when PostgreSQL "make install" installs the
|
||||||
|
# headers, the mtime of the headers are not changed when there have
|
||||||
|
# been no changes to the files. Changing the mtime triggers an
|
||||||
|
# unnecessary rebuild of 'postgres_ffi'.
|
||||||
|
PG_CONFIGURE_OPTS += INSTALL='$(ROOT_PROJECT_DIR)/scripts/ninstall.sh -C'
|
||||||
|
|
||||||
# Choose whether we should be silent or verbose
|
# Choose whether we should be silent or verbose
|
||||||
CARGO_BUILD_FLAGS += --$(if $(filter s,$(MAKEFLAGS)),quiet,verbose)
|
CARGO_BUILD_FLAGS += --$(if $(filter s,$(MAKEFLAGS)),quiet,verbose)
|
||||||
# Fix for a corner case when make doesn't pass a jobserver
|
# Fix for a corner case when make doesn't pass a jobserver
|
||||||
@@ -41,69 +52,160 @@ CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
|
|||||||
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
|
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
|
||||||
|
|
||||||
#
|
#
|
||||||
# Top level Makefile to build Zenith and PostgreSQL
|
# Top level Makefile to build Neon and PostgreSQL
|
||||||
#
|
#
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: zenith postgres
|
all: neon postgres neon-pg-ext
|
||||||
|
|
||||||
### Zenith Rust bits
|
### Neon Rust bits
|
||||||
#
|
#
|
||||||
# The 'postgres_ffi' depends on the Postgres headers.
|
# The 'postgres_ffi' depends on the Postgres headers.
|
||||||
.PHONY: zenith
|
.PHONY: neon
|
||||||
zenith: postgres-headers
|
neon: postgres-v14-headers postgres-v15-headers
|
||||||
+@echo "Compiling Zenith"
|
+@echo "Compiling Neon"
|
||||||
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
||||||
|
|
||||||
### PostgreSQL parts
|
### PostgreSQL parts
|
||||||
tmp_install/build/config.status:
|
# The rules are duplicated for Postgres v14 and 15. We may want to refactor
|
||||||
+@echo "Configuring postgres build"
|
# to avoid the duplication in the future, but it's tolerable for now.
|
||||||
mkdir -p tmp_install/build
|
#
|
||||||
(cd tmp_install/build && \
|
$(POSTGRES_INSTALL_DIR)/build/v14/config.status:
|
||||||
../../vendor/postgres/configure CFLAGS='$(PG_CFLAGS)' \
|
+@echo "Configuring Postgres v14 build"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/v14 && \
|
||||||
|
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-v14/configure \
|
||||||
|
CFLAGS='$(PG_CFLAGS)' \
|
||||||
$(PG_CONFIGURE_OPTS) \
|
$(PG_CONFIGURE_OPTS) \
|
||||||
$(SECCOMP) \
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v14 > configure.log)
|
||||||
--prefix=$(abspath tmp_install) > configure.log)
|
|
||||||
|
|
||||||
# nicer alias for running 'configure'
|
$(POSTGRES_INSTALL_DIR)/build/v15/config.status:
|
||||||
.PHONY: postgres-configure
|
+@echo "Configuring Postgres v15 build"
|
||||||
postgres-configure: tmp_install/build/config.status
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/v15 && \
|
||||||
|
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-v15/configure \
|
||||||
|
CFLAGS='$(PG_CFLAGS)' \
|
||||||
|
$(PG_CONFIGURE_OPTS) \
|
||||||
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v15 > configure.log)
|
||||||
|
|
||||||
# Install the PostgreSQL header files into tmp_install/include
|
# nicer alias to run 'configure'
|
||||||
.PHONY: postgres-headers
|
.PHONY: postgres-v14-configure
|
||||||
postgres-headers: postgres-configure
|
postgres-v14-configure: $(POSTGRES_INSTALL_DIR)/build/v14/config.status
|
||||||
+@echo "Installing PostgreSQL headers"
|
|
||||||
$(MAKE) -C tmp_install/build/src/include MAKELEVEL=0 install
|
|
||||||
|
|
||||||
# Compile and install PostgreSQL and contrib/neon
|
.PHONY: postgres-v15-configure
|
||||||
.PHONY: postgres
|
postgres-v15-configure: $(POSTGRES_INSTALL_DIR)/build/v15/config.status
|
||||||
postgres: postgres-configure \
|
|
||||||
postgres-headers # to prevent `make install` conflicts with zenith's `postgres-headers`
|
|
||||||
+@echo "Compiling PostgreSQL"
|
|
||||||
$(MAKE) -C tmp_install/build MAKELEVEL=0 install
|
|
||||||
+@echo "Compiling contrib/neon"
|
|
||||||
$(MAKE) -C tmp_install/build/contrib/neon install
|
|
||||||
+@echo "Compiling contrib/neon_test_utils"
|
|
||||||
$(MAKE) -C tmp_install/build/contrib/neon_test_utils install
|
|
||||||
+@echo "Compiling pg_buffercache"
|
|
||||||
$(MAKE) -C tmp_install/build/contrib/pg_buffercache install
|
|
||||||
+@echo "Compiling pageinspect"
|
|
||||||
$(MAKE) -C tmp_install/build/contrib/pageinspect install
|
|
||||||
|
|
||||||
|
# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)/<version>/include
|
||||||
|
.PHONY: postgres-v14-headers
|
||||||
|
postgres-v14-headers: postgres-v14-configure
|
||||||
|
+@echo "Installing PostgreSQL v14 headers"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/include MAKELEVEL=0 install
|
||||||
|
|
||||||
.PHONY: postgres-clean
|
.PHONY: postgres-v15-headers
|
||||||
postgres-clean:
|
postgres-v15-headers: postgres-v15-configure
|
||||||
$(MAKE) -C tmp_install/build MAKELEVEL=0 clean
|
+@echo "Installing PostgreSQL v15 headers"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/include MAKELEVEL=0 install
|
||||||
|
|
||||||
|
# Compile and install PostgreSQL
|
||||||
|
.PHONY: postgres-v14
|
||||||
|
postgres-v14: postgres-v14-configure \
|
||||||
|
postgres-v14-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
||||||
|
+@echo "Compiling PostgreSQL v14"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 install
|
||||||
|
+@echo "Compiling libpq v14"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq install
|
||||||
|
+@echo "Compiling pg_prewarm v14"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_prewarm install
|
||||||
|
+@echo "Compiling pg_buffercache v14"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache install
|
||||||
|
+@echo "Compiling pageinspect v14"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect install
|
||||||
|
|
||||||
|
.PHONY: postgres-v15
|
||||||
|
postgres-v15: postgres-v15-configure \
|
||||||
|
postgres-v15-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
||||||
|
+@echo "Compiling PostgreSQL v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 install
|
||||||
|
+@echo "Compiling libpq v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq install
|
||||||
|
+@echo "Compiling pg_prewarm v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_prewarm install
|
||||||
|
+@echo "Compiling pg_buffercache v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache install
|
||||||
|
+@echo "Compiling pageinspect v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect install
|
||||||
|
|
||||||
|
# shorthand to build all Postgres versions
|
||||||
|
postgres: postgres-v14 postgres-v15
|
||||||
|
|
||||||
|
.PHONY: postgres-v14-clean
|
||||||
|
postgres-v14-clean:
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq clean
|
||||||
|
|
||||||
|
.PHONY: postgres-v15-clean
|
||||||
|
postgres-v15-clean:
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq clean
|
||||||
|
|
||||||
|
neon-pg-ext-v14: postgres-v14
|
||||||
|
+@echo "Compiling neon v14"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v14 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
||||||
|
+@echo "Compiling neon_walredo v14"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-walredo-v14 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install)
|
||||||
|
+@echo "Compiling neon_test_utils" v14
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
||||||
|
|
||||||
|
neon-pg-ext-v15: postgres-v15
|
||||||
|
+@echo "Compiling neon v15"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v15 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
||||||
|
+@echo "Compiling neon_walredo v15"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-walredo-v15 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install)
|
||||||
|
+@echo "Compiling neon_test_utils" v15
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
||||||
|
|
||||||
|
.PHONY: neon-pg-ext-clean
|
||||||
|
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon clean
|
||||||
|
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils clean
|
||||||
|
|
||||||
|
neon-pg-ext: neon-pg-ext-v14 neon-pg-ext-v15
|
||||||
|
postgres-headers: postgres-v14-headers postgres-v15-headers
|
||||||
|
postgres-clean: postgres-v14-clean postgres-v15-clean
|
||||||
|
|
||||||
# This doesn't remove the effects of 'configure'.
|
# This doesn't remove the effects of 'configure'.
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
cd tmp_install/build && $(MAKE) clean
|
cd $(POSTGRES_INSTALL_DIR)/build/v14 && $(MAKE) clean
|
||||||
|
cd $(POSTGRES_INSTALL_DIR)/build/v15 && $(MAKE) clean
|
||||||
$(CARGO_CMD_PREFIX) cargo clean
|
$(CARGO_CMD_PREFIX) cargo clean
|
||||||
|
cd pgxn/neon && $(MAKE) clean
|
||||||
|
cd pgxn/neon_test_utils && $(MAKE) clean
|
||||||
|
|
||||||
# This removes everything
|
# This removes everything
|
||||||
.PHONY: distclean
|
.PHONY: distclean
|
||||||
distclean:
|
distclean:
|
||||||
rm -rf tmp_install
|
rm -rf $(POSTGRES_INSTALL_DIR)
|
||||||
$(CARGO_CMD_PREFIX) cargo clean
|
$(CARGO_CMD_PREFIX) cargo clean
|
||||||
|
|
||||||
.PHONY: fmt
|
.PHONY: fmt
|
||||||
@@ -112,4 +214,4 @@ fmt:
|
|||||||
|
|
||||||
.PHONY: setup-pre-commit-hook
|
.PHONY: setup-pre-commit-hook
|
||||||
setup-pre-commit-hook:
|
setup-pre-commit-hook:
|
||||||
ln -s -f ../../pre-commit.py .git/hooks/pre-commit
|
ln -s -f $(ROOT_PROJECT_DIR)/pre-commit.py .git/hooks/pre-commit
|
||||||
|
|||||||
4
NOTICE
4
NOTICE
@@ -1,5 +1,5 @@
|
|||||||
Neon
|
Neon
|
||||||
Copyright 2022 Neon Inc.
|
Copyright 2022 Neon Inc.
|
||||||
|
|
||||||
The PostgreSQL submodule in vendor/postgres is licensed under the
|
The PostgreSQL submodules in vendor/postgres-v14 and vendor/postgres-v15 are licensed under the
|
||||||
PostgreSQL license. See vendor/postgres/COPYRIGHT.
|
PostgreSQL license. See vendor/postgres-v14/COPYRIGHT and vendor/postgres-v15/COPYRIGHT.
|
||||||
|
|||||||
102
README.md
102
README.md
@@ -1,6 +1,6 @@
|
|||||||
# Neon
|
# Neon
|
||||||
|
|
||||||
Neon is a serverless open source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||||
|
|
||||||
The project used to be called "Zenith". Many of the commands and code comments
|
The project used to be called "Zenith". Many of the commands and code comments
|
||||||
still refer to "zenith", but we are in the process of renaming things.
|
still refer to "zenith", but we are in the process of renaming things.
|
||||||
@@ -12,32 +12,32 @@ Alternatively, compile and run the project [locally](#running-local-installation
|
|||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||
A Neon installation consists of compute nodes and Neon storage engine.
|
A Neon installation consists of compute nodes and a Neon storage engine.
|
||||||
|
|
||||||
Compute nodes are stateless PostgreSQL nodes, backed by Neon storage engine.
|
Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
|
||||||
|
|
||||||
Neon storage engine consists of two major components:
|
The Neon storage engine consists of two major components:
|
||||||
- Pageserver. Scalable storage backend for compute nodes.
|
- Pageserver. Scalable storage backend for the compute nodes.
|
||||||
- WAL service. The service that receives WAL from compute node and ensures that it is stored durably.
|
- WAL service. The service receives WAL from the compute node and ensures that it is stored durably.
|
||||||
|
|
||||||
Pageserver consists of:
|
Pageserver consists of:
|
||||||
- Repository - Neon storage implementation.
|
- Repository - Neon storage implementation.
|
||||||
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
||||||
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
||||||
- WAL redo - service that builds pages from base images and WAL records on Page service request.
|
- WAL redo - service that builds pages from base images and WAL records on Page service request
|
||||||
|
|
||||||
## Running local installation
|
## Running local installation
|
||||||
|
|
||||||
|
|
||||||
#### Installing dependencies on Linux
|
#### Installing dependencies on Linux
|
||||||
1. Install build dependencies and other useful packages
|
1. Install build dependencies and other applicable packages
|
||||||
|
|
||||||
* On Ubuntu or Debian this set of packages should be sufficient to build the code:
|
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||||
```bash
|
```bash
|
||||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||||
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client
|
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client
|
||||||
```
|
```
|
||||||
* On Fedora these packages are needed:
|
* On Fedora, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||||
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib
|
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib
|
||||||
@@ -53,7 +53,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
|||||||
1. Install XCode and dependencies
|
1. Install XCode and dependencies
|
||||||
```
|
```
|
||||||
xcode-select --install
|
xcode-select --install
|
||||||
brew install protobuf etcd openssl
|
brew install protobuf etcd openssl flex bison
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -69,7 +69,18 @@ brew install libpq
|
|||||||
brew link --force libpq
|
brew link --force libpq
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Building on Linux and OSX
|
#### Rustc version
|
||||||
|
|
||||||
|
The project uses [rust toolchain file](./rust-toolchain.toml) to define the version it's built with in CI for testing and local builds.
|
||||||
|
|
||||||
|
This file is automatically picked up by [`rustup`](https://rust-lang.github.io/rustup/overrides.html#the-toolchain-file) that installs (if absent) and uses the toolchain version pinned in the file.
|
||||||
|
|
||||||
|
rustup users who want to build with another toolchain can use [`rustup override`](https://rust-lang.github.io/rustup/overrides.html#directory-overrides) command to set a specific toolchain for the project's directory.
|
||||||
|
|
||||||
|
non-rustup users most probably are not getting the same toolchain automatically from the file, so are responsible to manually verify their toolchain matches the version in the file.
|
||||||
|
Newer rustc versions most probably will work fine, yet older ones might not be supported due to some new features used by the project or the crates.
|
||||||
|
|
||||||
|
#### Building on Linux
|
||||||
|
|
||||||
1. Build neon and patched postgres
|
1. Build neon and patched postgres
|
||||||
```
|
```
|
||||||
@@ -78,42 +89,59 @@ brew link --force libpq
|
|||||||
git clone --recursive https://github.com/neondatabase/neon.git
|
git clone --recursive https://github.com/neondatabase/neon.git
|
||||||
cd neon
|
cd neon
|
||||||
|
|
||||||
# The preferred and default is to make a debug build. This will create a
|
# The preferred and default is to make a debug build. This will create a
|
||||||
# demonstrably slower build than a release build. If you want to use a release
|
# demonstrably slower build than a release build. For a release build,
|
||||||
# build, utilize "`BUILD_TYPE=release make -j`nproc``"
|
# use "BUILD_TYPE=release make -j`nproc`"
|
||||||
|
|
||||||
make -j`nproc`
|
make -j`nproc`
|
||||||
```
|
```
|
||||||
|
|
||||||
#### dependency installation notes
|
#### Building on OSX
|
||||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively.
|
|
||||||
|
1. Build neon and patched postgres
|
||||||
|
```
|
||||||
|
# Note: The path to the neon sources can not contain a space.
|
||||||
|
|
||||||
|
git clone --recursive https://github.com/neondatabase/neon.git
|
||||||
|
cd neon
|
||||||
|
|
||||||
|
# The preferred and default is to make a debug build. This will create a
|
||||||
|
# demonstrably slower build than a release build. For a release build,
|
||||||
|
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu`"
|
||||||
|
|
||||||
|
make -j`sysctl -n hw.logicalcpu`
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Dependency installation notes
|
||||||
|
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||||
|
|
||||||
To run the integration tests or Python scripts (not required to use the code), install
|
To run the integration tests or Python scripts (not required to use the code), install
|
||||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires poetry) in the project directory.
|
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry](https://python-poetry.org/)) in the project directory.
|
||||||
|
|
||||||
|
|
||||||
#### running neon database
|
#### Running neon database
|
||||||
1. Start pageserver and postgres on top of it (should be called from repo root):
|
1. Start pageserver and postgres on top of it (should be called from repo root):
|
||||||
```sh
|
```sh
|
||||||
# Create repository in .neon with proper paths to binaries and data
|
# Create repository in .neon with proper paths to binaries and data
|
||||||
# Later that would be responsibility of a package install script
|
# Later that would be responsibility of a package install script
|
||||||
> ./target/debug/neon_local init
|
> ./target/debug/neon_local init
|
||||||
initializing tenantid 9ef87a5bf0d92544f6fafeeb3239695c
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
created initial timeline de200bd42b49cc1814412c7e592dd6e9 timeline.lsn 0/16B5A50
|
pageserver started, pid: 2545906
|
||||||
initial timeline de200bd42b49cc1814412c7e592dd6e9 created
|
Successfully initialized timeline de200bd42b49cc1814412c7e592dd6e9
|
||||||
pageserver init succeeded
|
Stopped pageserver 1 process with pid 2545906
|
||||||
|
|
||||||
# start pageserver and safekeeper
|
# start pageserver and safekeeper
|
||||||
> ./target/debug/neon_local start
|
> ./target/debug/neon_local start
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'
|
Starting etcd broker using "/usr/bin/etcd"
|
||||||
Pageserver started
|
etcd started, pid: 2545996
|
||||||
initializing for sk 1 for 7676
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'
|
pageserver started, pid: 2546005
|
||||||
Safekeeper started
|
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
||||||
|
safekeeper 1 started, pid: 2546041
|
||||||
|
|
||||||
# start postgres compute node
|
# start postgres compute node
|
||||||
> ./target/debug/neon_local pg start main
|
> ./target/debug/neon_local pg start main
|
||||||
Starting new postgres main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
||||||
|
|
||||||
@@ -123,7 +151,7 @@ Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=pos
|
|||||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Now it is possible to connect to postgres and run some queries:
|
2. Now, it is possible to connect to postgres and run some queries:
|
||||||
```text
|
```text
|
||||||
> psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
|
> psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
|
||||||
postgres=# CREATE TABLE t(key int primary key, value text);
|
postgres=# CREATE TABLE t(key int primary key, value text);
|
||||||
@@ -181,17 +209,21 @@ postgres=# select * from t;
|
|||||||
(1 row)
|
(1 row)
|
||||||
```
|
```
|
||||||
|
|
||||||
4. If you want to run tests afterwards (see below), you have to stop all the running the pageserver, safekeeper and postgres instances
|
4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
|
||||||
you have just started. You can stop them all with one command:
|
you have just started. You can terminate them all with one command:
|
||||||
```sh
|
```sh
|
||||||
> ./target/debug/neon_local stop
|
> ./target/debug/neon_local stop
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running tests
|
## Running tests
|
||||||
|
|
||||||
|
Ensure your dependencies are installed as described [here](https://github.com/neondatabase/neon#dependency-installation-notes).
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone --recursive https://github.com/neondatabase/neon.git
|
git clone --recursive https://github.com/neondatabase/neon.git
|
||||||
make # builds also postgres and installs it to ./tmp_install
|
|
||||||
|
CARGO_BUILD_FLAGS="--features=testing" make
|
||||||
|
|
||||||
./scripts/pytest
|
./scripts/pytest
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -205,8 +237,8 @@ To view your `rustdoc` documentation in a browser, try running `cargo doc --no-d
|
|||||||
|
|
||||||
### Postgres-specific terms
|
### Postgres-specific terms
|
||||||
|
|
||||||
Due to Neon's very close relation with PostgreSQL internals, there are numerous specific terms used.
|
Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used.
|
||||||
Same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.
|
The same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.
|
||||||
|
|
||||||
To get more familiar with this aspect, refer to:
|
To get more familiar with this aspect, refer to:
|
||||||
|
|
||||||
|
|||||||
188
cli-v2-story.md
188
cli-v2-story.md
@@ -1,188 +0,0 @@
|
|||||||
Create a new Zenith repository in the current directory:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli init
|
|
||||||
The files belonging to this database system will be owned by user "heikki".
|
|
||||||
This user must also own the server process.
|
|
||||||
|
|
||||||
The database cluster will be initialized with locale "en_GB.UTF-8".
|
|
||||||
The default database encoding has accordingly been set to "UTF8".
|
|
||||||
The default text search configuration will be set to "english".
|
|
||||||
|
|
||||||
Data page checksums are disabled.
|
|
||||||
|
|
||||||
creating directory tmp ... ok
|
|
||||||
creating subdirectories ... ok
|
|
||||||
selecting dynamic shared memory implementation ... posix
|
|
||||||
selecting default max_connections ... 100
|
|
||||||
selecting default shared_buffers ... 128MB
|
|
||||||
selecting default time zone ... Europe/Helsinki
|
|
||||||
creating configuration files ... ok
|
|
||||||
running bootstrap script ... ok
|
|
||||||
performing post-bootstrap initialization ... ok
|
|
||||||
syncing data to disk ... ok
|
|
||||||
|
|
||||||
initdb: warning: enabling "trust" authentication for local connections
|
|
||||||
You can change this by editing pg_hba.conf or using the option -A, or
|
|
||||||
--auth-local and --auth-host, the next time you run initdb.
|
|
||||||
new zenith repository was created in .zenith
|
|
||||||
|
|
||||||
Initially, there is only one branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
|
||||||
main
|
|
||||||
|
|
||||||
Start a local Postgres instance on the branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start main
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:27:43.919 EEST [984664] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv6 address "::1", port 5432
|
|
||||||
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv4 address "127.0.0.1", port 5432
|
|
||||||
2021-04-13 09:27:43.927 EEST [984664] LOG: listening on Unix socket "/tmp/.s.PGSQL.5432"
|
|
||||||
2021-04-13 09:27:43.939 EEST [984665] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:27:43.939 EEST [984665] LOG: creating missing WAL directory "pg_wal/archive_status"
|
|
||||||
2021-04-13 09:27:44.189 EEST [984665] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:27:44.195 EEST [984665] LOG: invalid record length at 0/15FFB80: wanted 24, got 0
|
|
||||||
2021-04-13 09:27:44.195 EEST [984665] LOG: redo is not required
|
|
||||||
2021-04-13 09:27:44.225 EEST [984664] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
|
|
||||||
Run some commands against it:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "create table foo (t text);"
|
|
||||||
CREATE TABLE
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "insert into foo values ('inserted on the main branch');"
|
|
||||||
INSERT 0 1
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
Create a new branch called 'experimental'. We create it from the
|
|
||||||
current end of the 'main' branch, but you could specify a different
|
|
||||||
LSN as the start point instead.
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch experimental main
|
|
||||||
branching at end of WAL: 0/161F478
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
|
||||||
experimental
|
|
||||||
main
|
|
||||||
|
|
||||||
Start another Postgres instance off the 'experimental' branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:28:41.874 EEST [984766] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv6 address "::1", port 5433
|
|
||||||
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
|
||||||
2021-04-13 09:28:41.883 EEST [984766] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
|
||||||
2021-04-13 09:28:41.896 EEST [984767] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:28:42.265 EEST [984767] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:28:42.269 EEST [984767] LOG: redo starts at 0/15FFB80
|
|
||||||
2021-04-13 09:28:42.272 EEST [984767] LOG: invalid record length at 0/161F4B0: wanted 24, got 0
|
|
||||||
2021-04-13 09:28:42.272 EEST [984767] LOG: redo done at 0/161F478 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
|
||||||
2021-04-13 09:28:42.321 EEST [984766] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
|
|
||||||
Insert some a row on the 'experimental' branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "insert into foo values ('inserted on experimental')"
|
|
||||||
INSERT 0 1
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
inserted on experimental
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
See that the other Postgres instance is still running on 'main' branch on port 5432:
|
|
||||||
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5432 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Everything is stored in the .zenith directory:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/
|
|
||||||
total 12
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 datadirs
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 refs
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 timelines
|
|
||||||
|
|
||||||
The 'datadirs' directory contains the datadirs of the running instances:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/
|
|
||||||
total 8
|
|
||||||
drwx------ 18 heikki heikki 4096 Apr 13 09:27 3c0c634c1674079b2c6d4edf7c91523e
|
|
||||||
drwx------ 18 heikki heikki 4096 Apr 13 09:28 697e3c103d4b1763cd6e82e4ff361d76
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/3c0c634c1674079b2c6d4edf7c91523e/
|
|
||||||
total 124
|
|
||||||
drwxr-xr-x 5 heikki heikki 4096 Apr 13 09:27 base
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 global
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_commit_ts
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_dynshmem
|
|
||||||
-rw------- 1 heikki heikki 4760 Apr 13 09:27 pg_hba.conf
|
|
||||||
-rw------- 1 heikki heikki 1636 Apr 13 09:27 pg_ident.conf
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:32 pg_logical
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 pg_multixact
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_notify
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_replslot
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_serial
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_snapshots
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_stat
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:34 pg_stat_tmp
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_subtrans
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_tblspc
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_twophase
|
|
||||||
-rw------- 1 heikki heikki 3 Apr 13 09:27 PG_VERSION
|
|
||||||
lrwxrwxrwx 1 heikki heikki 52 Apr 13 09:27 pg_wal -> ../../timelines/3c0c634c1674079b2c6d4edf7c91523e/wal
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_xact
|
|
||||||
-rw------- 1 heikki heikki 88 Apr 13 09:27 postgresql.auto.conf
|
|
||||||
-rw------- 1 heikki heikki 28688 Apr 13 09:27 postgresql.conf
|
|
||||||
-rw------- 1 heikki heikki 96 Apr 13 09:27 postmaster.opts
|
|
||||||
-rw------- 1 heikki heikki 149 Apr 13 09:27 postmaster.pid
|
|
||||||
|
|
||||||
Note how 'pg_wal' is just a symlink to the 'timelines' directory. The
|
|
||||||
datadir is ephemeral, you can delete it at any time, and it can be reconstructed
|
|
||||||
from the snapshots and WAL stored in the 'timelines' directory. So if you push/pull
|
|
||||||
the repository, the 'datadirs' are not included. (They are like git working trees)
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ killall -9 postgres
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ rm -rf .zenith/datadirs/*
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:37:05.476 EEST [985340] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv6 address "::1", port 5433
|
|
||||||
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
|
||||||
2021-04-13 09:37:05.487 EEST [985340] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
|
||||||
2021-04-13 09:37:05.498 EEST [985341] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:37:05.808 EEST [985341] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:37:05.813 EEST [985341] LOG: redo starts at 0/15FFB80
|
|
||||||
2021-04-13 09:37:05.815 EEST [985341] LOG: invalid record length at 0/161F770: wanted 24, got 0
|
|
||||||
2021-04-13 09:37:05.815 EEST [985341] LOG: redo done at 0/161F738 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
|
||||||
2021-04-13 09:37:05.866 EEST [985340] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
inserted on experimental
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
@@ -4,19 +4,20 @@ version = "0.1.0"
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
libc = "0.2"
|
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
clap = "3.0"
|
clap = "4.0"
|
||||||
env_logger = "0.9"
|
env_logger = "0.9"
|
||||||
|
futures = "0.3.13"
|
||||||
hyper = { version = "0.14", features = ["full"] }
|
hyper = { version = "0.14", features = ["full"] }
|
||||||
log = { version = "0.4", features = ["std", "serde"] }
|
log = { version = "0.4", features = ["std", "serde"] }
|
||||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
notify = "5.0.0"
|
||||||
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
regex = "1"
|
regex = "1"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
|
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
|
||||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
url = "2.2.2"
|
url = "2.2.2"
|
||||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||||
|
|||||||
@@ -51,53 +51,19 @@ fn main() -> Result<()> {
|
|||||||
// TODO: re-use `utils::logging` later
|
// TODO: re-use `utils::logging` later
|
||||||
init_logger(DEFAULT_LOG_LEVEL)?;
|
init_logger(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
// Env variable is set by `cargo`
|
let matches = cli().get_matches();
|
||||||
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
|
|
||||||
let matches = clap::App::new("compute_ctl")
|
|
||||||
.version(version.unwrap_or("unknown"))
|
|
||||||
.arg(
|
|
||||||
Arg::new("connstr")
|
|
||||||
.short('C')
|
|
||||||
.long("connstr")
|
|
||||||
.value_name("DATABASE_URL")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgdata")
|
|
||||||
.short('D')
|
|
||||||
.long("pgdata")
|
|
||||||
.value_name("DATADIR")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgbin")
|
|
||||||
.short('b')
|
|
||||||
.long("pgbin")
|
|
||||||
.value_name("POSTGRES_PATH"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec")
|
|
||||||
.short('s')
|
|
||||||
.long("spec")
|
|
||||||
.value_name("SPEC_JSON"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec-path")
|
|
||||||
.short('S')
|
|
||||||
.long("spec-path")
|
|
||||||
.value_name("SPEC_PATH"),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
let pgdata = matches.value_of("pgdata").expect("PGDATA path is required");
|
let pgdata = matches
|
||||||
|
.get_one::<String>("pgdata")
|
||||||
|
.expect("PGDATA path is required");
|
||||||
let connstr = matches
|
let connstr = matches
|
||||||
.value_of("connstr")
|
.get_one::<String>("connstr")
|
||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec = matches.value_of("spec");
|
let spec = matches.get_one::<String>("spec");
|
||||||
let spec_path = matches.value_of("spec-path");
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
|
||||||
// Try to use just 'postgres' if no path is provided
|
// Try to use just 'postgres' if no path is provided
|
||||||
let pgbin = matches.value_of("pgbin").unwrap_or("postgres");
|
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
||||||
|
|
||||||
let spec: ComputeSpec = match spec {
|
let spec: ComputeSpec = match spec {
|
||||||
// First, try to get cluster spec from the cli argument
|
// First, try to get cluster spec from the cli argument
|
||||||
@@ -157,7 +123,7 @@ fn main() -> Result<()> {
|
|||||||
exit(code)
|
exit(code)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!("could not start the compute node: {}", error);
|
error!("could not start the compute node: {:?}", error);
|
||||||
|
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.write().unwrap();
|
||||||
state.error = Some(format!("{:?}", error));
|
state.error = Some(format!("{:?}", error));
|
||||||
@@ -173,3 +139,48 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cli() -> clap::Command {
|
||||||
|
// Env variable is set by `cargo`
|
||||||
|
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
||||||
|
clap::Command::new("compute_ctl")
|
||||||
|
.version(version)
|
||||||
|
.arg(
|
||||||
|
Arg::new("connstr")
|
||||||
|
.short('C')
|
||||||
|
.long("connstr")
|
||||||
|
.value_name("DATABASE_URL")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgdata")
|
||||||
|
.short('D')
|
||||||
|
.long("pgdata")
|
||||||
|
.value_name("DATADIR")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgbin")
|
||||||
|
.short('b')
|
||||||
|
.long("pgbin")
|
||||||
|
.default_value("postgres")
|
||||||
|
.value_name("POSTGRES_PATH"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec")
|
||||||
|
.short('s')
|
||||||
|
.long("spec")
|
||||||
|
.value_name("SPEC_JSON"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec-path")
|
||||||
|
.short('S')
|
||||||
|
.long("spec-path")
|
||||||
|
.value_name("SPEC_PATH"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_cli() {
|
||||||
|
cli().debug_assert()
|
||||||
|
}
|
||||||
|
|||||||
@@ -187,10 +187,13 @@ impl ComputeNode {
|
|||||||
let sync_output = sync_handle
|
let sync_output = sync_handle
|
||||||
.wait_with_output()
|
.wait_with_output()
|
||||||
.expect("postgres --sync-safekeepers failed");
|
.expect("postgres --sync-safekeepers failed");
|
||||||
|
|
||||||
if !sync_output.status.success() {
|
if !sync_output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"postgres --sync-safekeepers exited with non-zero status: {}",
|
"postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
|
||||||
sync_output.status,
|
sync_output.status,
|
||||||
|
String::from_utf8(sync_output.stdout)
|
||||||
|
.expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,14 +257,7 @@ impl ComputeNode {
|
|||||||
.spawn()
|
.spawn()
|
||||||
.expect("cannot start postgres process");
|
.expect("cannot start postgres process");
|
||||||
|
|
||||||
// Try default Postgres port if it is not provided
|
wait_for_postgres(&mut pg, pgdata_path)?;
|
||||||
let port = self
|
|
||||||
.spec
|
|
||||||
.cluster
|
|
||||||
.settings
|
|
||||||
.find("port")
|
|
||||||
.unwrap_or_else(|| "5432".to_string());
|
|
||||||
wait_for_postgres(&mut pg, &port, pgdata_path)?;
|
|
||||||
|
|
||||||
// If connection fails,
|
// If connection fails,
|
||||||
// it may be the old node with `zenith_admin` superuser.
|
// it may be the old node with `zenith_admin` superuser.
|
||||||
@@ -295,7 +291,7 @@ impl ComputeNode {
|
|||||||
handle_roles(&self.spec, &mut client)?;
|
handle_roles(&self.spec, &mut client)?;
|
||||||
handle_databases(&self.spec, &mut client)?;
|
handle_databases(&self.spec, &mut client)?;
|
||||||
handle_role_deletions(self, &mut client)?;
|
handle_role_deletions(self, &mut client)?;
|
||||||
handle_grants(&self.spec, &mut client)?;
|
handle_grants(self, &mut client)?;
|
||||||
create_writablity_check_data(&mut client)?;
|
create_writablity_check_data(&mut client)?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
|
|||||||
@@ -1,18 +1,18 @@
|
|||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
use std::fs;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufRead, BufReader};
|
use std::io::{BufRead, BufReader};
|
||||||
use std::net::{SocketAddr, TcpStream};
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
use std::str::FromStr;
|
use std::time::{Duration, Instant};
|
||||||
use std::{fs, thread, time};
|
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
|
use notify::{RecursiveMode, Watcher};
|
||||||
use postgres::{Client, Transaction};
|
use postgres::{Client, Transaction};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: u64 = 60 * 1000; // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
||||||
|
|
||||||
/// Rust representation of Postgres role info with only those fields
|
/// Rust representation of Postgres role info with only those fields
|
||||||
/// that matter for us.
|
/// that matter for us.
|
||||||
@@ -62,9 +62,16 @@ impl GenericOption {
|
|||||||
/// Represent `GenericOption` as configuration option.
|
/// Represent `GenericOption` as configuration option.
|
||||||
pub fn to_pg_setting(&self) -> String {
|
pub fn to_pg_setting(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
|
let name = match self.name.as_str() {
|
||||||
|
"safekeepers" => "neon.safekeepers",
|
||||||
|
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
||||||
|
"wal_acceptor_connection_timeout" => "neon.safekeeper_connection_timeout",
|
||||||
|
it => it,
|
||||||
|
};
|
||||||
|
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} = '{}'", self.name, val),
|
"string" => format!("{} = '{}'", name, val),
|
||||||
_ => format!("{} = {}", self.name, val),
|
_ => format!("{} = {}", name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.name.to_owned()
|
self.name.to_owned()
|
||||||
@@ -161,7 +168,7 @@ impl Database {
|
|||||||
/// it may require a proper quoting too.
|
/// it may require a proper quoting too.
|
||||||
pub fn to_pg_options(&self) -> String {
|
pub fn to_pg_options(&self) -> String {
|
||||||
let mut params: String = self.options.as_pg_options();
|
let mut params: String = self.options.as_pg_options();
|
||||||
write!(params, " OWNER {}", &self.owner.quote())
|
write!(params, " OWNER {}", &self.owner.pg_quote())
|
||||||
.expect("String is documented to not to error during write operations");
|
.expect("String is documented to not to error during write operations");
|
||||||
|
|
||||||
params
|
params
|
||||||
@@ -172,18 +179,17 @@ impl Database {
|
|||||||
/// intended to be used for DB / role names.
|
/// intended to be used for DB / role names.
|
||||||
pub type PgIdent = String;
|
pub type PgIdent = String;
|
||||||
|
|
||||||
/// Generic trait used to provide quoting for strings used in the
|
/// Generic trait used to provide quoting / encoding for strings used in the
|
||||||
/// Postgres SQL queries. Currently used only to implement quoting
|
/// Postgres SQL queries and DATABASE_URL.
|
||||||
/// of identifiers, but could be used for literals in the future.
|
pub trait Escaping {
|
||||||
pub trait PgQuote {
|
fn pg_quote(&self) -> String;
|
||||||
fn quote(&self) -> String;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PgQuote for PgIdent {
|
impl Escaping for PgIdent {
|
||||||
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
||||||
/// always quotes provided string with `""` and escapes every `"`. Not idempotent,
|
/// always quotes provided string with `""` and escapes every `"`.
|
||||||
/// i.e. if string is already escaped it will be escaped again.
|
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again.
|
||||||
fn quote(&self) -> String {
|
fn pg_quote(&self) -> String {
|
||||||
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -223,50 +229,112 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
Ok(postgres_dbs)
|
Ok(postgres_dbs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for Postgres to become ready to accept connections:
|
/// Wait for Postgres to become ready to accept connections. It's ready to
|
||||||
/// - state should be `ready` in the `pgdata/postmaster.pid`
|
/// accept connections when the state-field in `pgdata/postmaster.pid` says
|
||||||
/// - and we should be able to connect to 127.0.0.1:5432
|
/// 'ready'.
|
||||||
pub fn wait_for_postgres(pg: &mut Child, port: &str, pgdata: &Path) -> Result<()> {
|
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
|
||||||
let pid_path = pgdata.join("postmaster.pid");
|
let pid_path = pgdata.join("postmaster.pid");
|
||||||
let mut slept: u64 = 0; // ms
|
|
||||||
let pause = time::Duration::from_millis(100);
|
|
||||||
|
|
||||||
let timeout = time::Duration::from_millis(10);
|
// PostgreSQL writes line "ready" to the postmaster.pid file, when it has
|
||||||
let addr = SocketAddr::from_str(&format!("127.0.0.1:{}", port)).unwrap();
|
// completed initialization and is ready to accept connections. We want to
|
||||||
|
// react quickly and perform the rest of our initialization as soon as
|
||||||
|
// PostgreSQL starts accepting connections. Use 'notify' to be notified
|
||||||
|
// whenever the PID file is changed, and whenever it changes, read it to
|
||||||
|
// check if it's now "ready".
|
||||||
|
//
|
||||||
|
// You cannot actually watch a file before it exists, so we first watch the
|
||||||
|
// data directory, and once the postmaster.pid file appears, we switch to
|
||||||
|
// watch the file instead. We also wake up every 100 ms to poll, just in
|
||||||
|
// case we miss some events for some reason. Not strictly necessary, but
|
||||||
|
// better safe than sorry.
|
||||||
|
let (tx, rx) = std::sync::mpsc::channel();
|
||||||
|
let (mut watcher, rx): (Box<dyn Watcher>, _) = match notify::recommended_watcher(move |res| {
|
||||||
|
let _ = tx.send(res);
|
||||||
|
}) {
|
||||||
|
Ok(watcher) => (Box::new(watcher), rx),
|
||||||
|
Err(e) => {
|
||||||
|
match e.kind {
|
||||||
|
notify::ErrorKind::Io(os) if os.raw_os_error() == Some(38) => {
|
||||||
|
// docker on m1 macs does not support recommended_watcher
|
||||||
|
// but return "Function not implemented (os error 38)"
|
||||||
|
// see https://github.com/notify-rs/notify/issues/423
|
||||||
|
let (tx, rx) = std::sync::mpsc::channel();
|
||||||
|
|
||||||
loop {
|
// let's poll it faster than what we check the results for (100ms)
|
||||||
// Sleep POSTGRES_WAIT_TIMEOUT at max (a bit longer actually if consider a TCP timeout,
|
let config =
|
||||||
// but postgres starts listening almost immediately, even if it is not really
|
notify::Config::default().with_poll_interval(Duration::from_millis(50));
|
||||||
// ready to accept connections).
|
|
||||||
if slept >= POSTGRES_WAIT_TIMEOUT {
|
let watcher = notify::PollWatcher::new(
|
||||||
bail!("timed out while waiting for Postgres to start");
|
move |res| {
|
||||||
|
let _ = tx.send(res);
|
||||||
|
},
|
||||||
|
config,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
(Box::new(watcher), rx)
|
||||||
|
}
|
||||||
|
_ => return Err(e.into()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
watcher.watch(pgdata, RecursiveMode::NonRecursive)?;
|
||||||
|
|
||||||
|
let started_at = Instant::now();
|
||||||
|
let mut postmaster_pid_seen = false;
|
||||||
|
loop {
|
||||||
if let Ok(Some(status)) = pg.try_wait() {
|
if let Ok(Some(status)) = pg.try_wait() {
|
||||||
// Postgres exited, that is not what we expected, bail out earlier.
|
// Postgres exited, that is not what we expected, bail out earlier.
|
||||||
let code = status.code().unwrap_or(-1);
|
let code = status.code().unwrap_or(-1);
|
||||||
bail!("Postgres exited unexpectedly with code {}", code);
|
bail!("Postgres exited unexpectedly with code {}", code);
|
||||||
}
|
}
|
||||||
|
|
||||||
if pid_path.exists() {
|
let res = rx.recv_timeout(Duration::from_millis(100));
|
||||||
let file = BufReader::new(File::open(&pid_path)?);
|
log::debug!("woken up by notify: {res:?}");
|
||||||
let status = file
|
// If there are multiple events in the channel already, we only need to be
|
||||||
.lines()
|
// check once. Swallow the extra events before we go ahead to check the
|
||||||
.last()
|
// pid file.
|
||||||
.unwrap()
|
while let Ok(res) = rx.try_recv() {
|
||||||
.unwrap_or_else(|_| "unknown".to_string());
|
log::debug!("swallowing extra event: {res:?}");
|
||||||
let can_connect = TcpStream::connect_timeout(&addr, timeout).is_ok();
|
}
|
||||||
|
|
||||||
// Now Postgres is ready to accept connections
|
// Check that we can open pid file first.
|
||||||
if status.trim() == "ready" && can_connect {
|
if let Ok(file) = File::open(&pid_path) {
|
||||||
break;
|
if !postmaster_pid_seen {
|
||||||
|
log::debug!("postmaster.pid appeared");
|
||||||
|
watcher
|
||||||
|
.unwatch(pgdata)
|
||||||
|
.expect("Failed to remove pgdata dir watch");
|
||||||
|
watcher
|
||||||
|
.watch(&pid_path, RecursiveMode::NonRecursive)
|
||||||
|
.expect("Failed to add postmaster.pid file watch");
|
||||||
|
postmaster_pid_seen = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let file = BufReader::new(file);
|
||||||
|
let last_line = file.lines().last();
|
||||||
|
|
||||||
|
// Pid file could be there and we could read it, but it could be empty, for example.
|
||||||
|
if let Some(Ok(line)) = last_line {
|
||||||
|
let status = line.trim();
|
||||||
|
log::debug!("last line of postmaster.pid: {status:?}");
|
||||||
|
|
||||||
|
// Now Postgres is ready to accept connections
|
||||||
|
if status == "ready" {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
thread::sleep(pause);
|
// Give up after POSTGRES_WAIT_TIMEOUT.
|
||||||
slept += 100;
|
let duration = started_at.elapsed();
|
||||||
|
if duration >= POSTGRES_WAIT_TIMEOUT {
|
||||||
|
bail!("timed out while waiting for Postgres to start");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log::info!("PostgreSQL is now running, continuing to configure it");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use log::{info, log_enabled, warn, Level};
|
use log::{info, log_enabled, warn, Level};
|
||||||
|
use postgres::config::Config;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
@@ -115,8 +117,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_roles.iter().any(|r| r.name == op.name) {
|
if existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER ROLE {} RENAME TO {}",
|
"ALTER ROLE {} RENAME TO {}",
|
||||||
op.name.quote(),
|
op.name.pg_quote(),
|
||||||
new_name.quote()
|
new_name.pg_quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
||||||
@@ -162,7 +164,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if update_role {
|
if update_role {
|
||||||
let mut query: String = format!("ALTER ROLE {} ", name.quote());
|
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote());
|
||||||
info_print!(" -> update");
|
info_print!(" -> update");
|
||||||
|
|
||||||
query.push_str(&role.to_pg_options());
|
query.push_str(&role.to_pg_options());
|
||||||
@@ -170,7 +172,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
info!("role name: '{}'", &name);
|
info!("role name: '{}'", &name);
|
||||||
let mut query: String = format!("CREATE ROLE {} ", name.quote());
|
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote());
|
||||||
info!("role create query: '{}'", &query);
|
info!("role create query: '{}'", &query);
|
||||||
info_print!(" -> create");
|
info_print!(" -> create");
|
||||||
|
|
||||||
@@ -179,7 +181,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
let grant_query = format!(
|
let grant_query = format!(
|
||||||
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
||||||
name.quote()
|
name.pg_quote()
|
||||||
);
|
);
|
||||||
xact.execute(grant_query.as_str(), &[])?;
|
xact.execute(grant_query.as_str(), &[])?;
|
||||||
info!("role grant query: '{}'", &grant_query);
|
info!("role grant query: '{}'", &grant_query);
|
||||||
@@ -215,7 +217,7 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
// We do not check either role exists or not,
|
// We do not check either role exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
if op.action == "delete_role" {
|
if op.action == "delete_role" {
|
||||||
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.quote());
|
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote());
|
||||||
|
|
||||||
warn!("deleting role '{}'", &op.name);
|
warn!("deleting role '{}'", &op.name);
|
||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
@@ -230,17 +232,16 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &node.spec.cluster.databases {
|
||||||
if db.owner != *role_name {
|
if db.owner != *role_name {
|
||||||
let mut connstr = node.connstr.clone();
|
let mut conf = Config::from_str(node.connstr.as_str())?;
|
||||||
// database name is always the last and the only component of the path
|
conf.dbname(&db.name);
|
||||||
connstr.set_path(&db.name);
|
|
||||||
|
|
||||||
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
// This will reassign all dependent objects to the db owner
|
// This will reassign all dependent objects to the db owner
|
||||||
let reassign_query = format!(
|
let reassign_query = format!(
|
||||||
"REASSIGN OWNED BY {} TO {}",
|
"REASSIGN OWNED BY {} TO {}",
|
||||||
role_name.quote(),
|
role_name.pg_quote(),
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
||||||
@@ -249,7 +250,7 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
|
|||||||
client.simple_query(&reassign_query)?;
|
client.simple_query(&reassign_query)?;
|
||||||
|
|
||||||
// This now will only drop privileges of the role
|
// This now will only drop privileges of the role
|
||||||
let drop_query = format!("DROP OWNED BY {}", role_name.quote());
|
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote());
|
||||||
client.simple_query(&drop_query)?;
|
client.simple_query(&drop_query)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -279,7 +280,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// We do not check either DB exists or not,
|
// We do not check either DB exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
"delete_db" => {
|
"delete_db" => {
|
||||||
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.quote());
|
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
|
||||||
|
|
||||||
warn!("deleting database '{}'", &op.name);
|
warn!("deleting database '{}'", &op.name);
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
@@ -291,8 +292,8 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_dbs.iter().any(|r| r.name == op.name) {
|
if existing_dbs.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} RENAME TO {}",
|
"ALTER DATABASE {} RENAME TO {}",
|
||||||
op.name.quote(),
|
op.name.pg_quote(),
|
||||||
new_name.quote()
|
new_name.pg_quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
||||||
@@ -320,7 +321,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// XXX: db owner name is returned as quoted string from Postgres,
|
// XXX: db owner name is returned as quoted string from Postgres,
|
||||||
// when quoting is needed.
|
// when quoting is needed.
|
||||||
let new_owner = if r.owner.starts_with('"') {
|
let new_owner = if r.owner.starts_with('"') {
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
} else {
|
} else {
|
||||||
db.owner.clone()
|
db.owner.clone()
|
||||||
};
|
};
|
||||||
@@ -328,15 +329,15 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if new_owner != r.owner {
|
if new_owner != r.owner {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} OWNER TO {}",
|
"ALTER DATABASE {} OWNER TO {}",
|
||||||
name.quote(),
|
name.pg_quote(),
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
);
|
);
|
||||||
info_print!(" -> update");
|
info_print!(" -> update");
|
||||||
|
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut query: String = format!("CREATE DATABASE {} ", name.quote());
|
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
||||||
info_print!(" -> create");
|
info_print!(" -> create");
|
||||||
|
|
||||||
query.push_str(&db.to_pg_options());
|
query.push_str(&db.to_pg_options());
|
||||||
@@ -349,9 +350,11 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grant CREATE ON DATABASE to the database owner
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
// to allow clients create trusted extensions.
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
pub fn handle_grants(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
||||||
|
let spec = &node.spec;
|
||||||
|
|
||||||
info!("cluster spec grants:");
|
info!("cluster spec grants:");
|
||||||
|
|
||||||
// We now have a separate `web_access` role to connect to the database
|
// We now have a separate `web_access` role to connect to the database
|
||||||
@@ -364,7 +367,7 @@ pub fn handle_grants(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
.cluster
|
.cluster
|
||||||
.roles
|
.roles
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| r.name.quote())
|
.map(|r| r.name.pg_quote())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for db in &spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
@@ -372,7 +375,7 @@ pub fn handle_grants(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"GRANT CREATE ON DATABASE {} TO {}",
|
"GRANT CREATE ON DATABASE {} TO {}",
|
||||||
dbname.quote(),
|
dbname.pg_quote(),
|
||||||
roles.join(", ")
|
roles.join(", ")
|
||||||
);
|
);
|
||||||
info!("grant query {}", &query);
|
info!("grant query {}", &query);
|
||||||
@@ -380,5 +383,73 @@ pub fn handle_grants(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
|
// atomically.
|
||||||
|
for db in &node.spec.cluster.databases {
|
||||||
|
let mut conf = Config::from_str(node.connstr.as_str())?;
|
||||||
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
|
// This will only change ownership on the schema itself, not the objects
|
||||||
|
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
||||||
|
// and database owner cannot do anything with it. SQL procedure ensures
|
||||||
|
// that it won't error out if schema `public` doesn't exist.
|
||||||
|
let alter_query = format!(
|
||||||
|
"DO $$\n\
|
||||||
|
DECLARE\n\
|
||||||
|
schema_owner TEXT;\n\
|
||||||
|
BEGIN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
SELECT nspowner::regrole::text\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
INTO schema_owner;\n\
|
||||||
|
\n\
|
||||||
|
IF schema_owner = 'cloud_admin' OR schema_owner = 'zenith_admin'\n\
|
||||||
|
THEN\n\
|
||||||
|
ALTER SCHEMA public OWNER TO {};\n\
|
||||||
|
END IF;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END\n\
|
||||||
|
$$;",
|
||||||
|
db.owner.pg_quote()
|
||||||
|
);
|
||||||
|
db_client.simple_query(&alter_query)?;
|
||||||
|
|
||||||
|
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
|
||||||
|
// This is needed because since postgres 15 this privilege is removed by default.
|
||||||
|
let grant_query = "DO $$\n\
|
||||||
|
BEGIN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
) AND\n\
|
||||||
|
current_setting('server_version_num')::int/10000 >= 15\n\
|
||||||
|
THEN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT rolname\n\
|
||||||
|
FROM pg_catalog.pg_roles\n\
|
||||||
|
WHERE rolname = 'web_access'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
GRANT CREATE ON SCHEMA public TO web_access;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END\n\
|
||||||
|
$$;"
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
info!("grant query for db {} : {}", &db.name, &grant_query);
|
||||||
|
db_client.simple_query(&grant_query)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,7 +85,7 @@
|
|||||||
"vartype": "bool"
|
"vartype": "bool"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "safekeepers",
|
"name": "neon.safekeepers",
|
||||||
"value": "127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501",
|
"value": "127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501",
|
||||||
"vartype": "string"
|
"vartype": "string"
|
||||||
},
|
},
|
||||||
@@ -181,7 +181,6 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
||||||
"delta_operations": [
|
"delta_operations": [
|
||||||
{
|
{
|
||||||
"action": "delete_db",
|
"action": "delete_db",
|
||||||
|
|||||||
@@ -28,14 +28,14 @@ mod pg_helpers_tests {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
spec.cluster.settings.as_pg_settings(),
|
spec.cluster.settings.as_pg_settings(),
|
||||||
"fsync = off\nwal_level = replica\nhot_standby = on\nsafekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenant_id = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timeline_id = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
|
"fsync = off\nwal_level = replica\nhot_standby = on\nneon.safekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenant_id = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timeline_id = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn quote_ident() {
|
fn ident_pg_quote() {
|
||||||
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
||||||
|
|
||||||
assert_eq!(ident.quote(), "\"\"\"name\"\";\\n select 1;\"");
|
assert_eq!(ident.pg_quote(), "\"\"\"name\"\";\\n select 1;\"");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,20 +4,25 @@ version = "0.1.0"
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tar = "0.4.38"
|
|
||||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
serde_with = "1.12.0"
|
|
||||||
toml = "0.5"
|
|
||||||
lazy_static = "1.4"
|
|
||||||
regex = "1"
|
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
thiserror = "1"
|
clap = "4.0"
|
||||||
nix = "0.23"
|
comfy-table = "6.1"
|
||||||
url = "2.2.2"
|
git-version = "0.3.5"
|
||||||
|
nix = "0.25"
|
||||||
|
once_cell = "1.13.0"
|
||||||
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
|
regex = "1"
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
|
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_with = "2.0"
|
||||||
|
tar = "0.4.38"
|
||||||
|
thiserror = "1"
|
||||||
|
toml = "0.5"
|
||||||
|
url = "2.2.2"
|
||||||
|
|
||||||
pageserver = { path = "../pageserver" }
|
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
||||||
safekeeper = { path = "../safekeeper" }
|
# instead, so that recompile times are better.
|
||||||
|
pageserver_api = { path = "../libs/pageserver_api" }
|
||||||
|
safekeeper_api = { path = "../libs/safekeeper_api" }
|
||||||
utils = { path = "../libs/utils" }
|
utils = { path = "../libs/utils" }
|
||||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Minimal zenith environment with one safekeeper. This is equivalent to the built-in
|
# Minimal neon environment with one safekeeper. This is equivalent to the built-in
|
||||||
# defaults that you get with no --config
|
# defaults that you get with no --config
|
||||||
[pageserver]
|
[pageserver]
|
||||||
listen_pg_addr = '127.0.0.1:64000'
|
listen_pg_addr = '127.0.0.1:64000'
|
||||||
|
|||||||
279
control_plane/src/background_process.rs
Normal file
279
control_plane/src/background_process.rs
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
//! Spawns and kills background processes that are needed by Neon CLI.
|
||||||
|
//! Applies common set-up such as log and pid files (if needed) to every process.
|
||||||
|
//!
|
||||||
|
//! Neon CLI does not run in background, so it needs to store the information about
|
||||||
|
//! spawned processes, which it does in this module.
|
||||||
|
//! We do that by storing the pid of the process in the "${process_name}.pid" file.
|
||||||
|
//! The pid file can be created by the process itself
|
||||||
|
//! (Neon storage binaries do that and also ensure that a lock is taken onto that file)
|
||||||
|
//! or we create such file after starting the process
|
||||||
|
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
||||||
|
//! The pid stored in the file is later used to stop the service.
|
||||||
|
//!
|
||||||
|
//! See [`lock_file`] module for more info.
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::process::{Child, Command};
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::{fs, io, thread};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use nix::errno::Errno;
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
|
||||||
|
use utils::lock_file;
|
||||||
|
|
||||||
|
// These constants control the loop used to poll for process start / stop.
|
||||||
|
//
|
||||||
|
// The loop waits for at most 10 seconds, polling every 100 ms.
|
||||||
|
// Once a second, it prints a dot ("."), to give the user an indication that
|
||||||
|
// it's waiting. If the process hasn't started/stopped after 5 seconds,
|
||||||
|
// it prints a notice that it's taking long, but keeps waiting.
|
||||||
|
//
|
||||||
|
const RETRY_UNTIL_SECS: u64 = 10;
|
||||||
|
const RETRIES: u64 = (RETRY_UNTIL_SECS * 1000) / RETRY_INTERVAL_MILLIS;
|
||||||
|
const RETRY_INTERVAL_MILLIS: u64 = 100;
|
||||||
|
const DOT_EVERY_RETRIES: u64 = 10;
|
||||||
|
const NOTICE_AFTER_RETRIES: u64 = 50;
|
||||||
|
|
||||||
|
/// Argument to `start_process`, to indicate whether it should create pidfile or if the process creates
|
||||||
|
/// it itself.
|
||||||
|
pub enum InitialPidFile<'t> {
|
||||||
|
/// Create a pidfile, to allow future CLI invocations to manipulate the process.
|
||||||
|
Create(&'t Path),
|
||||||
|
/// The process will create the pidfile itself, need to wait for that event.
|
||||||
|
Expect(&'t Path),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start a background child process using the parameters given.
|
||||||
|
pub fn start_process<F, S: AsRef<OsStr>>(
|
||||||
|
process_name: &str,
|
||||||
|
datadir: &Path,
|
||||||
|
command: &Path,
|
||||||
|
args: &[S],
|
||||||
|
initial_pid_file: InitialPidFile,
|
||||||
|
process_status_check: F,
|
||||||
|
) -> anyhow::Result<Child>
|
||||||
|
where
|
||||||
|
F: Fn() -> anyhow::Result<bool>,
|
||||||
|
{
|
||||||
|
let log_path = datadir.join(format!("{process_name}.log"));
|
||||||
|
let process_log_file = fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.append(true)
|
||||||
|
.open(&log_path)
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Could not open {process_name} log file {log_path:?} for writing")
|
||||||
|
})?;
|
||||||
|
let same_file_for_stderr = process_log_file.try_clone().with_context(|| {
|
||||||
|
format!("Could not reuse {process_name} log file {log_path:?} for writing stderr")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let mut command = Command::new(command);
|
||||||
|
let background_command = command
|
||||||
|
.stdout(process_log_file)
|
||||||
|
.stderr(same_file_for_stderr)
|
||||||
|
.args(args);
|
||||||
|
let filled_cmd = fill_aws_secrets_vars(fill_rust_env_vars(background_command));
|
||||||
|
|
||||||
|
let mut spawned_process = filled_cmd.spawn().with_context(|| {
|
||||||
|
format!("Could not spawn {process_name}, see console output and log files for details.")
|
||||||
|
})?;
|
||||||
|
let pid = spawned_process.id();
|
||||||
|
let pid = Pid::from_raw(
|
||||||
|
i32::try_from(pid)
|
||||||
|
.with_context(|| format!("Subprocess {process_name} has invalid pid {pid}"))?,
|
||||||
|
);
|
||||||
|
|
||||||
|
let pid_file_to_check = match initial_pid_file {
|
||||||
|
InitialPidFile::Create(target_pid_file_path) => {
|
||||||
|
match lock_file::create_lock_file(target_pid_file_path, pid.to_string()) {
|
||||||
|
lock_file::LockCreationResult::Created { .. } => {
|
||||||
|
// We use "lock" file here only to create the pid file. The lock on the pidfile will be dropped as soon
|
||||||
|
// as this CLI invocation exits, so it's a bit useless, but doesn't any harm either.
|
||||||
|
}
|
||||||
|
lock_file::LockCreationResult::AlreadyLocked { .. } => {
|
||||||
|
anyhow::bail!("Cannot write pid file for {process_name} at path {target_pid_file_path:?}: file is already locked by another process")
|
||||||
|
}
|
||||||
|
lock_file::LockCreationResult::CreationFailed(e) => {
|
||||||
|
return Err(e.context(format!(
|
||||||
|
"Failed to create pid file for {process_name} at path {target_pid_file_path:?}"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
InitialPidFile::Expect(pid_file_path) => Some(pid_file_path),
|
||||||
|
};
|
||||||
|
|
||||||
|
for retries in 0..RETRIES {
|
||||||
|
match process_started(pid, pid_file_to_check, &process_status_check) {
|
||||||
|
Ok(true) => {
|
||||||
|
println!("\n{process_name} started, pid: {pid}");
|
||||||
|
return Ok(spawned_process);
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
if retries == NOTICE_AFTER_RETRIES {
|
||||||
|
// The process is taking a long time to start up. Keep waiting, but
|
||||||
|
// print a message
|
||||||
|
print!("\n{process_name} has not started yet, continuing to wait");
|
||||||
|
}
|
||||||
|
if retries % DOT_EVERY_RETRIES == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{process_name} failed to start: {e:#}");
|
||||||
|
if let Err(e) = spawned_process.kill() {
|
||||||
|
println!("Could not stop {process_name} subprocess: {e:#}")
|
||||||
|
};
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
anyhow::bail!("{process_name} did not start in {RETRY_UNTIL_SECS} seconds");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops the process, using the pid file given. Returns Ok also if the process is already not running.
|
||||||
|
pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> anyhow::Result<()> {
|
||||||
|
if !pid_file.exists() {
|
||||||
|
println!("{process_name} is already stopped: no pid file {pid_file:?} is present");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let pid = read_pidfile(pid_file)?;
|
||||||
|
|
||||||
|
let sig = if immediate {
|
||||||
|
print!("Stopping {process_name} with pid {pid} immediately..");
|
||||||
|
Signal::SIGQUIT
|
||||||
|
} else {
|
||||||
|
print!("Stopping {process_name} with pid {pid} gracefully..");
|
||||||
|
Signal::SIGTERM
|
||||||
|
};
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
match kill(pid, sig) {
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
println!(
|
||||||
|
"{process_name} with pid {pid} does not exist, but a pid file {pid_file:?} was found"
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(e) => anyhow::bail!("Failed to send signal to {process_name} with pid {pid}: {e}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until process is gone
|
||||||
|
for retries in 0..RETRIES {
|
||||||
|
match process_has_stopped(pid) {
|
||||||
|
Ok(true) => {
|
||||||
|
println!("\n{process_name} stopped");
|
||||||
|
if let Err(e) = fs::remove_file(pid_file) {
|
||||||
|
if e.kind() != io::ErrorKind::NotFound {
|
||||||
|
eprintln!("Failed to remove pid file {pid_file:?} after stopping the process: {e:#}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
if retries == NOTICE_AFTER_RETRIES {
|
||||||
|
// The process is taking a long time to start up. Keep waiting, but
|
||||||
|
// print a message
|
||||||
|
print!("\n{process_name} has not stopped yet, continuing to wait");
|
||||||
|
}
|
||||||
|
if retries % DOT_EVERY_RETRIES == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{process_name} with pid {pid} failed to stop: {e:#}");
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
anyhow::bail!("{process_name} with pid {pid} did not stop in {RETRY_UNTIL_SECS} seconds");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
||||||
|
let mut filled_cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
||||||
|
|
||||||
|
// Pass through these environment variables to the command
|
||||||
|
for var in ["LLVM_PROFILE_FILE", "FAILPOINTS", "RUST_LOG"] {
|
||||||
|
if let Some(val) = std::env::var_os(var) {
|
||||||
|
filled_cmd = filled_cmd.env(var, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filled_cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
||||||
|
for env_key in [
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_SESSION_TOKEN",
|
||||||
|
] {
|
||||||
|
if let Ok(value) = std::env::var(env_key) {
|
||||||
|
cmd = cmd.env(env_key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_started<F>(
|
||||||
|
pid: Pid,
|
||||||
|
pid_file_to_check: Option<&Path>,
|
||||||
|
status_check: &F,
|
||||||
|
) -> anyhow::Result<bool>
|
||||||
|
where
|
||||||
|
F: Fn() -> anyhow::Result<bool>,
|
||||||
|
{
|
||||||
|
match status_check() {
|
||||||
|
Ok(true) => match pid_file_to_check {
|
||||||
|
Some(pid_file_path) => {
|
||||||
|
if pid_file_path.exists() {
|
||||||
|
let pid_in_file = read_pidfile(pid_file_path)?;
|
||||||
|
Ok(pid_in_file == pid)
|
||||||
|
} else {
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => Ok(true),
|
||||||
|
},
|
||||||
|
Ok(false) => Ok(false),
|
||||||
|
Err(e) => anyhow::bail!("process failed to start: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read a PID file
|
||||||
|
///
|
||||||
|
/// We expect a file that contains a single integer.
|
||||||
|
fn read_pidfile(pidfile: &Path) -> Result<Pid> {
|
||||||
|
let pid_str = fs::read_to_string(pidfile)
|
||||||
|
.with_context(|| format!("failed to read pidfile {pidfile:?}"))?;
|
||||||
|
let pid: i32 = pid_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| anyhow!("failed to parse pidfile {pidfile:?}"))?;
|
||||||
|
if pid < 1 {
|
||||||
|
bail!("pidfile {pidfile:?} contained bad value '{pid}'");
|
||||||
|
}
|
||||||
|
Ok(Pid::from_raw(pid))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_has_stopped(pid: Pid) -> anyhow::Result<bool> {
|
||||||
|
match kill(pid, None) {
|
||||||
|
// Process exists, keep waiting
|
||||||
|
Ok(_) => Ok(false),
|
||||||
|
// Process not found, we're done
|
||||||
|
Err(Errno::ESRCH) => Ok(true),
|
||||||
|
Err(err) => anyhow::bail!("Failed to send signal to process with pid {pid}: {err}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -12,15 +12,14 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use utils::{
|
use utils::{
|
||||||
connstring::connection_host_port,
|
id::{TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
zid::{ZTenantId, ZTimelineId},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::local_env::LocalEnv;
|
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
||||||
|
use crate::pageserver::PageServerNode;
|
||||||
use crate::postgresql_conf::PostgresConf;
|
use crate::postgresql_conf::PostgresConf;
|
||||||
use crate::storage::PageServerNode;
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// ComputeControlPlane
|
// ComputeControlPlane
|
||||||
@@ -28,7 +27,7 @@ use crate::storage::PageServerNode;
|
|||||||
pub struct ComputeControlPlane {
|
pub struct ComputeControlPlane {
|
||||||
base_port: u16,
|
base_port: u16,
|
||||||
pageserver: Arc<PageServerNode>,
|
pageserver: Arc<PageServerNode>,
|
||||||
pub nodes: BTreeMap<(ZTenantId, String), Arc<PostgresNode>>,
|
pub nodes: BTreeMap<(TenantId, String), Arc<PostgresNode>>,
|
||||||
env: LocalEnv,
|
env: LocalEnv,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,11 +75,12 @@ impl ComputeControlPlane {
|
|||||||
|
|
||||||
pub fn new_node(
|
pub fn new_node(
|
||||||
&mut self,
|
&mut self,
|
||||||
tenant_id: ZTenantId,
|
tenant_id: TenantId,
|
||||||
name: &str,
|
name: &str,
|
||||||
timeline_id: ZTimelineId,
|
timeline_id: TimelineId,
|
||||||
lsn: Option<Lsn>,
|
lsn: Option<Lsn>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
|
pg_version: u32,
|
||||||
) -> Result<Arc<PostgresNode>> {
|
) -> Result<Arc<PostgresNode>> {
|
||||||
let port = port.unwrap_or_else(|| self.get_port());
|
let port = port.unwrap_or_else(|| self.get_port());
|
||||||
let node = Arc::new(PostgresNode {
|
let node = Arc::new(PostgresNode {
|
||||||
@@ -93,6 +93,7 @@ impl ComputeControlPlane {
|
|||||||
lsn,
|
lsn,
|
||||||
tenant_id,
|
tenant_id,
|
||||||
uses_wal_proposer: false,
|
uses_wal_proposer: false,
|
||||||
|
pg_version,
|
||||||
});
|
});
|
||||||
|
|
||||||
node.create_pgdata()?;
|
node.create_pgdata()?;
|
||||||
@@ -114,10 +115,11 @@ pub struct PostgresNode {
|
|||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pageserver: Arc<PageServerNode>,
|
pageserver: Arc<PageServerNode>,
|
||||||
is_test: bool,
|
is_test: bool,
|
||||||
pub timeline_id: ZTimelineId,
|
pub timeline_id: TimelineId,
|
||||||
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
|
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
|
||||||
pub tenant_id: ZTenantId,
|
pub tenant_id: TenantId,
|
||||||
uses_wal_proposer: bool,
|
uses_wal_proposer: bool,
|
||||||
|
pg_version: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PostgresNode {
|
impl PostgresNode {
|
||||||
@@ -148,9 +150,17 @@ impl PostgresNode {
|
|||||||
// Read a few options from the config file
|
// Read a few options from the config file
|
||||||
let context = format!("in config file {}", cfg_path_str);
|
let context = format!("in config file {}", cfg_path_str);
|
||||||
let port: u16 = conf.parse_field("port", &context)?;
|
let port: u16 = conf.parse_field("port", &context)?;
|
||||||
let timeline_id: ZTimelineId = conf.parse_field("neon.timeline_id", &context)?;
|
let timeline_id: TimelineId = conf.parse_field("neon.timeline_id", &context)?;
|
||||||
let tenant_id: ZTenantId = conf.parse_field("neon.tenant_id", &context)?;
|
let tenant_id: TenantId = conf.parse_field("neon.tenant_id", &context)?;
|
||||||
let uses_wal_proposer = conf.get("safekeepers").is_some();
|
let uses_wal_proposer = conf.get("neon.safekeepers").is_some();
|
||||||
|
|
||||||
|
// Read postgres version from PG_VERSION file to determine which postgres version binary to use.
|
||||||
|
// If it doesn't exist, assume broken data directory and use default pg version.
|
||||||
|
let pg_version_path = entry.path().join("PG_VERSION");
|
||||||
|
|
||||||
|
let pg_version_str =
|
||||||
|
fs::read_to_string(pg_version_path).unwrap_or_else(|_| DEFAULT_PG_VERSION.to_string());
|
||||||
|
let pg_version = u32::from_str(&pg_version_str)?;
|
||||||
|
|
||||||
// parse recovery_target_lsn, if any
|
// parse recovery_target_lsn, if any
|
||||||
let recovery_target_lsn: Option<Lsn> =
|
let recovery_target_lsn: Option<Lsn> =
|
||||||
@@ -167,17 +177,24 @@ impl PostgresNode {
|
|||||||
lsn: recovery_target_lsn,
|
lsn: recovery_target_lsn,
|
||||||
tenant_id,
|
tenant_id,
|
||||||
uses_wal_proposer,
|
uses_wal_proposer,
|
||||||
|
pg_version,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sync_safekeepers(&self, auth_token: &Option<String>) -> Result<Lsn> {
|
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
||||||
let pg_path = self.env.pg_bin_dir().join("postgres");
|
let pg_path = self.env.pg_bin_dir(pg_version)?.join("postgres");
|
||||||
let mut cmd = Command::new(&pg_path);
|
let mut cmd = Command::new(&pg_path);
|
||||||
|
|
||||||
cmd.arg("--sync-safekeepers")
|
cmd.arg("--sync-safekeepers")
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
.env(
|
||||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
"LD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
||||||
|
)
|
||||||
|
.env(
|
||||||
|
"DYLD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
||||||
|
)
|
||||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
.env("PGDATA", self.pgdata().to_str().unwrap())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
// Comment this to avoid capturing stderr (useful if command hangs)
|
// Comment this to avoid capturing stderr (useful if command hangs)
|
||||||
@@ -259,14 +276,12 @@ impl PostgresNode {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect to a page server, get base backup, and untar it to initialize a
|
// Write postgresql.conf with default configuration
|
||||||
// new data directory
|
// and PG_VERSION file to the data directory of a new node.
|
||||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
||||||
let mut conf = PostgresConf::new();
|
let mut conf = PostgresConf::new();
|
||||||
conf.append("max_wal_senders", "10");
|
conf.append("max_wal_senders", "10");
|
||||||
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
|
conf.append("wal_log_hints", "off");
|
||||||
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
|
|
||||||
conf.append("wal_log_hints", "on");
|
|
||||||
conf.append("max_replication_slots", "10");
|
conf.append("max_replication_slots", "10");
|
||||||
conf.append("hot_standby", "on");
|
conf.append("hot_standby", "on");
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
@@ -284,7 +299,8 @@ impl PostgresNode {
|
|||||||
|
|
||||||
// Configure the node to fetch pages from pageserver
|
// Configure the node to fetch pages from pageserver
|
||||||
let pageserver_connstr = {
|
let pageserver_connstr = {
|
||||||
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
let config = &self.pageserver.pg_connection_config;
|
||||||
|
let (host, port) = (config.host(), config.port());
|
||||||
|
|
||||||
// Set up authentication
|
// Set up authentication
|
||||||
//
|
//
|
||||||
@@ -292,7 +308,7 @@ impl PostgresNode {
|
|||||||
// variable during compute pg startup. It is done this way because
|
// variable during compute pg startup. It is done this way because
|
||||||
// otherwise user will be able to retrieve the value using SHOW
|
// otherwise user will be able to retrieve the value using SHOW
|
||||||
// command or pg_settings
|
// command or pg_settings
|
||||||
let password = if let AuthType::ZenithJWT = auth_type {
|
let password = if let AuthType::NeonJWT = auth_type {
|
||||||
"$ZENITH_AUTH_TOKEN"
|
"$ZENITH_AUTH_TOKEN"
|
||||||
} else {
|
} else {
|
||||||
""
|
""
|
||||||
@@ -301,7 +317,7 @@ impl PostgresNode {
|
|||||||
// Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN
|
// Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN
|
||||||
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
||||||
// uses only needed variables namely host, port, user, password.
|
// uses only needed variables namely host, port, user, password.
|
||||||
format!("postgresql://no_user:{}@{}:{}", password, host, port)
|
format!("postgresql://no_user:{password}@{host}:{port}")
|
||||||
};
|
};
|
||||||
conf.append("shared_preload_libraries", "neon");
|
conf.append("shared_preload_libraries", "neon");
|
||||||
conf.append_line("");
|
conf.append_line("");
|
||||||
@@ -327,7 +343,7 @@ impl PostgresNode {
|
|||||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||||
// (if they are not able to upload WAL to S3).
|
// (if they are not able to upload WAL to S3).
|
||||||
conf.append("max_replication_write_lag", "500MB");
|
conf.append("max_replication_write_lag", "15MB");
|
||||||
conf.append("max_replication_flush_lag", "10GB");
|
conf.append("max_replication_flush_lag", "10GB");
|
||||||
|
|
||||||
if !self.env.safekeepers.is_empty() {
|
if !self.env.safekeepers.is_empty() {
|
||||||
@@ -341,7 +357,7 @@ impl PostgresNode {
|
|||||||
.map(|sk| format!("localhost:{}", sk.pg_port))
|
.map(|sk| format!("localhost:{}", sk.pg_port))
|
||||||
.collect::<Vec<String>>()
|
.collect::<Vec<String>>()
|
||||||
.join(",");
|
.join(",");
|
||||||
conf.append("safekeepers", &safekeepers);
|
conf.append("neon.safekeepers", &safekeepers);
|
||||||
} else {
|
} else {
|
||||||
// We only use setup without safekeepers for tests,
|
// We only use setup without safekeepers for tests,
|
||||||
// and don't care about data durability on pageserver,
|
// and don't care about data durability on pageserver,
|
||||||
@@ -357,6 +373,9 @@ impl PostgresNode {
|
|||||||
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
||||||
file.write_all(conf.to_string().as_bytes())?;
|
file.write_all(conf.to_string().as_bytes())?;
|
||||||
|
|
||||||
|
let mut file = File::create(self.pgdata().join("PG_VERSION"))?;
|
||||||
|
file.write_all(self.pg_version.to_string().as_bytes())?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,7 +387,7 @@ impl PostgresNode {
|
|||||||
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
||||||
// procedure evolves quite actively right now, so let's think about it again
|
// procedure evolves quite actively right now, so let's think about it again
|
||||||
// when things would be more stable (TODO).
|
// when things would be more stable (TODO).
|
||||||
let lsn = self.sync_safekeepers(auth_token)?;
|
let lsn = self.sync_safekeepers(auth_token, self.pg_version)?;
|
||||||
if lsn == Lsn(0) {
|
if lsn == Lsn(0) {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@@ -401,7 +420,7 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||||
let pg_ctl_path = self.env.pg_bin_dir().join("pg_ctl");
|
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
||||||
let mut cmd = Command::new(pg_ctl_path);
|
let mut cmd = Command::new(pg_ctl_path);
|
||||||
cmd.args(
|
cmd.args(
|
||||||
[
|
[
|
||||||
@@ -417,8 +436,14 @@ impl PostgresNode {
|
|||||||
.concat(),
|
.concat(),
|
||||||
)
|
)
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
.env(
|
||||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap());
|
"LD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
)
|
||||||
|
.env(
|
||||||
|
"DYLD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
);
|
||||||
if let Some(token) = auth_token {
|
if let Some(token) = auth_token {
|
||||||
cmd.env("ZENITH_AUTH_TOKEN", token);
|
cmd.env("ZENITH_AUTH_TOKEN", token);
|
||||||
}
|
}
|
||||||
|
|||||||
57
control_plane/src/connection.rs
Normal file
57
control_plane/src/connection.rs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
use url::Url;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PgConnectionConfig {
|
||||||
|
url: Url,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PgConnectionConfig {
|
||||||
|
pub fn host(&self) -> &str {
|
||||||
|
self.url.host_str().expect("BUG: no host")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn port(&self) -> u16 {
|
||||||
|
self.url.port().expect("BUG: no port")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a `<host>:<port>` string.
|
||||||
|
pub fn raw_address(&self) -> String {
|
||||||
|
format!("{}:{}", self.host(), self.port())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connect using postgres protocol with TLS disabled.
|
||||||
|
pub fn connect_no_tls(&self) -> Result<postgres::Client, postgres::Error> {
|
||||||
|
postgres::Client::connect(self.url.as_str(), postgres::NoTls)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for PgConnectionConfig {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let mut url: Url = s.parse()?;
|
||||||
|
|
||||||
|
match url.scheme() {
|
||||||
|
"postgres" | "postgresql" => {}
|
||||||
|
other => anyhow::bail!("invalid scheme: {other}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's not a valid connection url if host is unavailable.
|
||||||
|
if url.host().is_none() {
|
||||||
|
anyhow::bail!(url::ParseError::EmptyHost);
|
||||||
|
}
|
||||||
|
|
||||||
|
// E.g. `postgres:bar`.
|
||||||
|
if url.cannot_be_a_base() {
|
||||||
|
anyhow::bail!("URL cannot be a base");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the default PG port if it's missing.
|
||||||
|
if url.port().is_none() {
|
||||||
|
url.set_port(Some(5432))
|
||||||
|
.expect("BUG: couldn't set the default port");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { url })
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,95 +1,75 @@
|
|||||||
use std::{
|
use std::{fs, path::PathBuf};
|
||||||
fs,
|
|
||||||
path::PathBuf,
|
|
||||||
process::{Command, Stdio},
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use nix::{
|
|
||||||
sys::signal::{kill, Signal},
|
|
||||||
unistd::Pid,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{local_env, read_pidfile};
|
use crate::{background_process, local_env};
|
||||||
|
|
||||||
pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
let etcd_broker = &env.etcd_broker;
|
let etcd_broker = &env.etcd_broker;
|
||||||
println!(
|
print!(
|
||||||
"Starting etcd broker using {}",
|
"Starting etcd broker using {:?}",
|
||||||
etcd_broker.etcd_binary_path.display()
|
etcd_broker.etcd_binary_path
|
||||||
);
|
);
|
||||||
|
|
||||||
let etcd_data_dir = env.base_data_dir.join("etcd");
|
let etcd_data_dir = env.base_data_dir.join("etcd");
|
||||||
fs::create_dir_all(&etcd_data_dir).with_context(|| {
|
fs::create_dir_all(&etcd_data_dir)
|
||||||
format!(
|
.with_context(|| format!("Failed to create etcd data dir {etcd_data_dir:?}"))?;
|
||||||
"Failed to create etcd data dir: {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let etcd_stdout_file =
|
|
||||||
fs::File::create(etcd_data_dir.join("etcd.stdout.log")).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create ectd stout file in directory {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let etcd_stderr_file =
|
|
||||||
fs::File::create(etcd_data_dir.join("etcd.stderr.log")).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create ectd stderr file in directory {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let client_urls = etcd_broker.comma_separated_endpoints();
|
let client_urls = etcd_broker.comma_separated_endpoints();
|
||||||
|
let args = [
|
||||||
|
format!("--data-dir={}", etcd_data_dir.display()),
|
||||||
|
format!("--listen-client-urls={client_urls}"),
|
||||||
|
format!("--advertise-client-urls={client_urls}"),
|
||||||
|
// Set --quota-backend-bytes to keep the etcd virtual memory
|
||||||
|
// size smaller. Our test etcd clusters are very small.
|
||||||
|
// See https://github.com/etcd-io/etcd/issues/7910
|
||||||
|
"--quota-backend-bytes=100000000".to_string(),
|
||||||
|
// etcd doesn't compact (vacuum) with default settings,
|
||||||
|
// enable it to prevent space exhaustion.
|
||||||
|
"--auto-compaction-mode=revision".to_string(),
|
||||||
|
"--auto-compaction-retention=1".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
let etcd_process = Command::new(&etcd_broker.etcd_binary_path)
|
let pid_file_path = etcd_pid_file_path(env);
|
||||||
.args(&[
|
|
||||||
format!("--data-dir={}", etcd_data_dir.display()),
|
|
||||||
format!("--listen-client-urls={client_urls}"),
|
|
||||||
format!("--advertise-client-urls={client_urls}"),
|
|
||||||
// Set --quota-backend-bytes to keep the etcd virtual memory
|
|
||||||
// size smaller. Our test etcd clusters are very small.
|
|
||||||
// See https://github.com/etcd-io/etcd/issues/7910
|
|
||||||
"--quota-backend-bytes=100000000".to_string(),
|
|
||||||
])
|
|
||||||
.stdout(Stdio::from(etcd_stdout_file))
|
|
||||||
.stderr(Stdio::from(etcd_stderr_file))
|
|
||||||
.spawn()
|
|
||||||
.context("Failed to spawn etcd subprocess")?;
|
|
||||||
let pid = etcd_process.id();
|
|
||||||
|
|
||||||
let etcd_pid_file_path = etcd_pid_file_path(env);
|
let client = reqwest::blocking::Client::new();
|
||||||
fs::write(&etcd_pid_file_path, pid.to_string()).with_context(|| {
|
|
||||||
format!(
|
background_process::start_process(
|
||||||
"Failed to create etcd pid file at {}",
|
"etcd",
|
||||||
etcd_pid_file_path.display()
|
&etcd_data_dir,
|
||||||
)
|
&etcd_broker.etcd_binary_path,
|
||||||
})?;
|
&args,
|
||||||
|
background_process::InitialPidFile::Create(&pid_file_path),
|
||||||
|
|| {
|
||||||
|
for broker_endpoint in &etcd_broker.broker_endpoints {
|
||||||
|
let request = broker_endpoint
|
||||||
|
.join("health")
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to append /health path to broker endopint {}",
|
||||||
|
broker_endpoint
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.and_then(|url| {
|
||||||
|
client.get(&url.to_string()).build().with_context(|| {
|
||||||
|
format!("Failed to construct request to etcd endpoint {url}")
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
if client.execute(request).is_ok() {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(false)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.context("Failed to spawn etcd subprocess")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stop_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
pub fn stop_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
let etcd_path = &env.etcd_broker.etcd_binary_path;
|
background_process::stop_process(true, "etcd", &etcd_pid_file_path(env))
|
||||||
println!("Stopping etcd broker at {}", etcd_path.display());
|
|
||||||
|
|
||||||
let etcd_pid_file_path = etcd_pid_file_path(env);
|
|
||||||
let pid = Pid::from_raw(read_pidfile(&etcd_pid_file_path).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to read etcd pid file at {}",
|
|
||||||
etcd_pid_file_path.display()
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
|
|
||||||
kill(pid, Signal::SIGTERM).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to stop etcd with pid {pid} at {}",
|
|
||||||
etcd_pid_file_path.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn etcd_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
fn etcd_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
||||||
|
|||||||
@@ -6,55 +6,12 @@
|
|||||||
// Intended to be used in integration tests and in CLI tools for
|
// Intended to be used in integration tests and in CLI tools for
|
||||||
// local installations.
|
// local installations.
|
||||||
//
|
//
|
||||||
use anyhow::{anyhow, bail, Context, Result};
|
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
|
mod background_process;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
|
pub mod connection;
|
||||||
pub mod etcd;
|
pub mod etcd;
|
||||||
pub mod local_env;
|
pub mod local_env;
|
||||||
|
pub mod pageserver;
|
||||||
pub mod postgresql_conf;
|
pub mod postgresql_conf;
|
||||||
pub mod safekeeper;
|
pub mod safekeeper;
|
||||||
pub mod storage;
|
|
||||||
|
|
||||||
/// Read a PID file
|
|
||||||
///
|
|
||||||
/// We expect a file that contains a single integer.
|
|
||||||
/// We return an i32 for compatibility with libc and nix.
|
|
||||||
pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
|
|
||||||
let pid_str = fs::read_to_string(pidfile)
|
|
||||||
.with_context(|| format!("failed to read pidfile {:?}", pidfile))?;
|
|
||||||
let pid: i32 = pid_str
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| anyhow!("failed to parse pidfile {:?}", pidfile))?;
|
|
||||||
if pid < 1 {
|
|
||||||
bail!("pidfile {:?} contained bad value '{}'", pidfile, pid);
|
|
||||||
}
|
|
||||||
Ok(pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
|
||||||
let cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
|
||||||
|
|
||||||
let var = "LLVM_PROFILE_FILE";
|
|
||||||
if let Some(val) = std::env::var_os(var) {
|
|
||||||
cmd.env(var, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
const RUST_LOG_KEY: &str = "RUST_LOG";
|
|
||||||
if let Ok(rust_log_value) = std::env::var(RUST_LOG_KEY) {
|
|
||||||
cmd.env(RUST_LOG_KEY, rust_log_value)
|
|
||||||
} else {
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|
||||||
for env_key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] {
|
|
||||||
if let Ok(value) = std::env::var(env_key) {
|
|
||||||
cmd = cmd.env(env_key, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,17 +14,19 @@ use std::path::{Path, PathBuf};
|
|||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use utils::{
|
use utils::{
|
||||||
auth::{encode_from_key_file, Claims, Scope},
|
auth::{encode_from_key_file, Claims, Scope},
|
||||||
|
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::safekeeper::SafekeeperNode;
|
use crate::safekeeper::SafekeeperNode;
|
||||||
|
|
||||||
|
pub const DEFAULT_PG_VERSION: u32 = 14;
|
||||||
|
|
||||||
//
|
//
|
||||||
// This data structures represents neon_local CLI config
|
// This data structures represents neon_local CLI config
|
||||||
//
|
//
|
||||||
// It is deserialized from the .neon/config file, or the config file passed
|
// It is deserialized from the .neon/config file, or the config file passed
|
||||||
// to 'zenith init --config=<path>' option. See control_plane/simple.conf for
|
// to 'neon_local init --config=<path>' option. See control_plane/simple.conf for
|
||||||
// an example.
|
// an example.
|
||||||
//
|
//
|
||||||
#[serde_as]
|
#[serde_as]
|
||||||
@@ -48,13 +50,13 @@ pub struct LocalEnv {
|
|||||||
|
|
||||||
// Path to pageserver binary.
|
// Path to pageserver binary.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub zenith_distrib_dir: PathBuf,
|
pub neon_distrib_dir: PathBuf,
|
||||||
|
|
||||||
// Default tenant ID to use with the 'zenith' command line utility, when
|
// Default tenant ID to use with the 'neon_local' command line utility, when
|
||||||
// --tenantid is not explicitly specified.
|
// --tenant_id is not explicitly specified.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||||
pub default_tenant_id: Option<ZTenantId>,
|
pub default_tenant_id: Option<TenantId>,
|
||||||
|
|
||||||
// used to issue tokens during e.g pg start
|
// used to issue tokens during e.g pg start
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@@ -69,11 +71,11 @@ pub struct LocalEnv {
|
|||||||
|
|
||||||
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
|
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
// A `HashMap<String, HashMap<ZTenantId, ZTimelineId>>` would be more appropriate here,
|
// A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here,
|
||||||
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
|
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
|
||||||
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
|
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
|
||||||
#[serde_as(as = "HashMap<_, Vec<(DisplayFromStr, DisplayFromStr)>>")]
|
#[serde_as(as = "HashMap<_, Vec<(DisplayFromStr, DisplayFromStr)>>")]
|
||||||
branch_name_mappings: HashMap<String, Vec<(ZTenantId, ZTimelineId)>>,
|
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Etcd broker config for cluster internal communication.
|
/// Etcd broker config for cluster internal communication.
|
||||||
@@ -195,29 +197,50 @@ impl Default for SafekeeperConf {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LocalEnv {
|
impl LocalEnv {
|
||||||
// postgres installation paths
|
pub fn pg_distrib_dir_raw(&self) -> PathBuf {
|
||||||
pub fn pg_bin_dir(&self) -> PathBuf {
|
self.pg_distrib_dir.clone()
|
||||||
self.pg_distrib_dir.join("bin")
|
|
||||||
}
|
|
||||||
pub fn pg_lib_dir(&self) -> PathBuf {
|
|
||||||
self.pg_distrib_dir.join("lib")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pageserver_bin(&self) -> anyhow::Result<PathBuf> {
|
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
Ok(self.zenith_distrib_dir.join("pageserver"))
|
let path = self.pg_distrib_dir.clone();
|
||||||
|
|
||||||
|
match pg_version {
|
||||||
|
14 => Ok(path.join(format!("v{pg_version}"))),
|
||||||
|
15 => Ok(path.join(format!("v{pg_version}"))),
|
||||||
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn safekeeper_bin(&self) -> anyhow::Result<PathBuf> {
|
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
Ok(self.zenith_distrib_dir.join("safekeeper"))
|
match pg_version {
|
||||||
|
14 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
||||||
|
15 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
||||||
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
|
match pg_version {
|
||||||
|
14 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
||||||
|
15 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
||||||
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pageserver_bin(&self) -> PathBuf {
|
||||||
|
self.neon_distrib_dir.join("pageserver")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn safekeeper_bin(&self) -> PathBuf {
|
||||||
|
self.neon_distrib_dir.join("safekeeper")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
||||||
self.base_data_dir.join("pgdatadirs").join("tenants")
|
self.base_data_dir.join("pgdatadirs").join("tenants")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dir(&self, tenantid: &ZTenantId, branch_name: &str) -> PathBuf {
|
pub fn pg_data_dir(&self, tenant_id: &TenantId, branch_name: &str) -> PathBuf {
|
||||||
self.pg_data_dirs_path()
|
self.pg_data_dirs_path()
|
||||||
.join(tenantid.to_string())
|
.join(tenant_id.to_string())
|
||||||
.join(branch_name)
|
.join(branch_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,8 +256,8 @@ impl LocalEnv {
|
|||||||
pub fn register_branch_mapping(
|
pub fn register_branch_mapping(
|
||||||
&mut self,
|
&mut self,
|
||||||
branch_name: String,
|
branch_name: String,
|
||||||
tenant_id: ZTenantId,
|
tenant_id: TenantId,
|
||||||
timeline_id: ZTimelineId,
|
timeline_id: TimelineId,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let existing_values = self
|
let existing_values = self
|
||||||
.branch_name_mappings
|
.branch_name_mappings
|
||||||
@@ -260,22 +283,22 @@ impl LocalEnv {
|
|||||||
pub fn get_branch_timeline_id(
|
pub fn get_branch_timeline_id(
|
||||||
&self,
|
&self,
|
||||||
branch_name: &str,
|
branch_name: &str,
|
||||||
tenant_id: ZTenantId,
|
tenant_id: TenantId,
|
||||||
) -> Option<ZTimelineId> {
|
) -> Option<TimelineId> {
|
||||||
self.branch_name_mappings
|
self.branch_name_mappings
|
||||||
.get(branch_name)?
|
.get(branch_name)?
|
||||||
.iter()
|
.iter()
|
||||||
.find(|(mapped_tenant_id, _)| mapped_tenant_id == &tenant_id)
|
.find(|(mapped_tenant_id, _)| mapped_tenant_id == &tenant_id)
|
||||||
.map(|&(_, timeline_id)| timeline_id)
|
.map(|&(_, timeline_id)| timeline_id)
|
||||||
.map(ZTimelineId::from)
|
.map(TimelineId::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn timeline_name_mappings(&self) -> HashMap<ZTenantTimelineId, String> {
|
pub fn timeline_name_mappings(&self) -> HashMap<TenantTimelineId, String> {
|
||||||
self.branch_name_mappings
|
self.branch_name_mappings
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|(name, tenant_timelines)| {
|
.flat_map(|(name, tenant_timelines)| {
|
||||||
tenant_timelines.iter().map(|&(tenant_id, timeline_id)| {
|
tenant_timelines.iter().map(|&(tenant_id, timeline_id)| {
|
||||||
(ZTenantTimelineId::new(tenant_id, timeline_id), name.clone())
|
(TenantTimelineId::new(tenant_id, timeline_id), name.clone())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
@@ -289,24 +312,26 @@ impl LocalEnv {
|
|||||||
let mut env: LocalEnv = toml::from_str(toml)?;
|
let mut env: LocalEnv = toml::from_str(toml)?;
|
||||||
|
|
||||||
// Find postgres binaries.
|
// Find postgres binaries.
|
||||||
// Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "tmp_install".
|
// Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "pg_install".
|
||||||
|
// Note that later in the code we assume, that distrib dirs follow the same pattern
|
||||||
|
// for all postgres versions.
|
||||||
if env.pg_distrib_dir == Path::new("") {
|
if env.pg_distrib_dir == Path::new("") {
|
||||||
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
|
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
|
||||||
env.pg_distrib_dir = postgres_bin.into();
|
env.pg_distrib_dir = postgres_bin.into();
|
||||||
} else {
|
} else {
|
||||||
let cwd = env::current_dir()?;
|
let cwd = env::current_dir()?;
|
||||||
env.pg_distrib_dir = cwd.join("tmp_install")
|
env.pg_distrib_dir = cwd.join("pg_install")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find zenith binaries.
|
// Find neon binaries.
|
||||||
if env.zenith_distrib_dir == Path::new("") {
|
if env.neon_distrib_dir == Path::new("") {
|
||||||
env.zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no initial tenant ID was given, generate it.
|
// If no initial tenant ID was given, generate it.
|
||||||
if env.default_tenant_id.is_none() {
|
if env.default_tenant_id.is_none() {
|
||||||
env.default_tenant_id = Some(ZTenantId::generate());
|
env.default_tenant_id = Some(TenantId::generate());
|
||||||
}
|
}
|
||||||
|
|
||||||
env.base_data_dir = base_path();
|
env.base_data_dir = base_path();
|
||||||
@@ -320,12 +345,12 @@ impl LocalEnv {
|
|||||||
|
|
||||||
if !repopath.exists() {
|
if !repopath.exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Zenith config is not found in {}. You need to run 'zenith init' first",
|
"Neon config is not found in {}. You need to run 'neon_local init' first",
|
||||||
repopath.to_str().unwrap()
|
repopath.to_str().unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: check that it looks like a zenith repository
|
// TODO: check that it looks like a neon repository
|
||||||
|
|
||||||
// load and parse file
|
// load and parse file
|
||||||
let config = fs::read_to_string(repopath.join("config"))?;
|
let config = fs::read_to_string(repopath.join("config"))?;
|
||||||
@@ -337,12 +362,12 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn persist_config(&self, base_path: &Path) -> anyhow::Result<()> {
|
pub fn persist_config(&self, base_path: &Path) -> anyhow::Result<()> {
|
||||||
// Currently, the user first passes a config file with 'zenith init --config=<path>'
|
// Currently, the user first passes a config file with 'neon_local init --config=<path>'
|
||||||
// We read that in, in `create_config`, and fill any missing defaults. Then it's saved
|
// We read that in, in `create_config`, and fill any missing defaults. Then it's saved
|
||||||
// to .neon/config. TODO: We lose any formatting and comments along the way, which is
|
// to .neon/config. TODO: We lose any formatting and comments along the way, which is
|
||||||
// a bit sad.
|
// a bit sad.
|
||||||
let mut conf_content = r#"# This file describes a locale deployment of the page server
|
let mut conf_content = r#"# This file describes a locale deployment of the page server
|
||||||
# and safekeeeper node. It is read by the 'zenith' command-line
|
# and safekeeeper node. It is read by the 'neon_local' command-line
|
||||||
# utility.
|
# utility.
|
||||||
"#
|
"#
|
||||||
.to_string();
|
.to_string();
|
||||||
@@ -382,9 +407,9 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Initialize a new Zenith repository
|
// Initialize a new Neon repository
|
||||||
//
|
//
|
||||||
pub fn init(&mut self) -> anyhow::Result<()> {
|
pub fn init(&mut self, pg_version: u32) -> anyhow::Result<()> {
|
||||||
// check if config already exists
|
// check if config already exists
|
||||||
let base_path = &self.base_data_dir;
|
let base_path = &self.base_data_dir;
|
||||||
ensure!(
|
ensure!(
|
||||||
@@ -397,17 +422,17 @@ impl LocalEnv {
|
|||||||
"directory '{}' already exists. Perhaps already initialized?",
|
"directory '{}' already exists. Perhaps already initialized?",
|
||||||
base_path.display()
|
base_path.display()
|
||||||
);
|
);
|
||||||
if !self.pg_distrib_dir.join("bin/postgres").exists() {
|
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Can't find postgres binary at {}",
|
"Can't find postgres binary at {}",
|
||||||
self.pg_distrib_dir.display()
|
self.pg_bin_dir(pg_version)?.display()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
for binary in ["pageserver", "safekeeper"] {
|
for binary in ["pageserver", "safekeeper"] {
|
||||||
if !self.zenith_distrib_dir.join(binary).exists() {
|
if !self.neon_distrib_dir.join(binary).exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Can't find binary '{binary}' in zenith distrib dir '{}'",
|
"Can't find binary '{binary}' in neon distrib dir '{}'",
|
||||||
self.zenith_distrib_dir.display()
|
self.neon_distrib_dir.display()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
539
control_plane/src/pageserver.rs
Normal file
539
control_plane/src/pageserver.rs
Normal file
@@ -0,0 +1,539 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::{self, File};
|
||||||
|
use std::io::{BufReader, Write};
|
||||||
|
use std::num::NonZeroU64;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::process::Child;
|
||||||
|
use std::{io, result};
|
||||||
|
|
||||||
|
use crate::connection::PgConnectionConfig;
|
||||||
|
use anyhow::{bail, Context};
|
||||||
|
use pageserver_api::models::{
|
||||||
|
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
||||||
|
};
|
||||||
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
|
use reqwest::{IntoUrl, Method};
|
||||||
|
use thiserror::Error;
|
||||||
|
use utils::{
|
||||||
|
http::error::HttpErrorBody,
|
||||||
|
id::{TenantId, TimelineId},
|
||||||
|
lsn::Lsn,
|
||||||
|
postgres_backend::AuthType,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{background_process, local_env::LocalEnv};
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum PageserverHttpError {
|
||||||
|
#[error("Reqwest error: {0}")]
|
||||||
|
Transport(#[from] reqwest::Error),
|
||||||
|
|
||||||
|
#[error("Error: {0}")]
|
||||||
|
Response(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<anyhow::Error> for PageserverHttpError {
|
||||||
|
fn from(e: anyhow::Error) -> Self {
|
||||||
|
Self::Response(e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Result<T> = result::Result<T, PageserverHttpError>;
|
||||||
|
|
||||||
|
pub trait ResponseErrorMessageExt: Sized {
|
||||||
|
fn error_from_body(self) -> Result<Self>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResponseErrorMessageExt for Response {
|
||||||
|
fn error_from_body(self) -> Result<Self> {
|
||||||
|
let status = self.status();
|
||||||
|
if !(status.is_client_error() || status.is_server_error()) {
|
||||||
|
return Ok(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
// reqwest does not export its error construction utility functions, so let's craft the message ourselves
|
||||||
|
let url = self.url().to_owned();
|
||||||
|
Err(PageserverHttpError::Response(
|
||||||
|
match self.json::<HttpErrorBody>() {
|
||||||
|
Ok(err_body) => format!("Error: {}", err_body.msg),
|
||||||
|
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Control routines for pageserver.
|
||||||
|
//
|
||||||
|
// Used in CLI and tests.
|
||||||
|
//
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PageServerNode {
|
||||||
|
pub pg_connection_config: PgConnectionConfig,
|
||||||
|
pub env: LocalEnv,
|
||||||
|
pub http_client: Client,
|
||||||
|
pub http_base_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PageServerNode {
|
||||||
|
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
||||||
|
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
||||||
|
&env.pageserver.auth_token
|
||||||
|
} else {
|
||||||
|
""
|
||||||
|
};
|
||||||
|
|
||||||
|
Self {
|
||||||
|
pg_connection_config: Self::pageserver_connection_config(
|
||||||
|
password,
|
||||||
|
&env.pageserver.listen_pg_addr,
|
||||||
|
),
|
||||||
|
env: env.clone(),
|
||||||
|
http_client: Client::new(),
|
||||||
|
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct libpq connection string for connecting to the pageserver.
|
||||||
|
fn pageserver_connection_config(password: &str, listen_addr: &str) -> PgConnectionConfig {
|
||||||
|
format!("postgresql://no_user:{password}@{listen_addr}/no_db")
|
||||||
|
.parse()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn initialize(
|
||||||
|
&self,
|
||||||
|
create_tenant: Option<TenantId>,
|
||||||
|
initial_timeline_id: Option<TimelineId>,
|
||||||
|
config_overrides: &[&str],
|
||||||
|
pg_version: u32,
|
||||||
|
) -> anyhow::Result<TimelineId> {
|
||||||
|
let id = format!("id={}", self.env.pageserver.id);
|
||||||
|
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
||||||
|
let pg_distrib_dir_param = format!(
|
||||||
|
"pg_distrib_dir='{}'",
|
||||||
|
self.env.pg_distrib_dir_raw().display()
|
||||||
|
);
|
||||||
|
|
||||||
|
let authg_type_param = format!("auth_type='{}'", self.env.pageserver.auth_type);
|
||||||
|
let listen_http_addr_param = format!(
|
||||||
|
"listen_http_addr='{}'",
|
||||||
|
self.env.pageserver.listen_http_addr
|
||||||
|
);
|
||||||
|
let listen_pg_addr_param =
|
||||||
|
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
||||||
|
let broker_endpoints_param = format!(
|
||||||
|
"broker_endpoints=[{}]",
|
||||||
|
self.env
|
||||||
|
.etcd_broker
|
||||||
|
.broker_endpoints
|
||||||
|
.iter()
|
||||||
|
.map(|url| format!("'{url}'"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",")
|
||||||
|
);
|
||||||
|
let broker_etcd_prefix_param = self
|
||||||
|
.env
|
||||||
|
.etcd_broker
|
||||||
|
.broker_etcd_prefix
|
||||||
|
.as_ref()
|
||||||
|
.map(|prefix| format!("broker_etcd_prefix='{prefix}'"));
|
||||||
|
|
||||||
|
let mut init_config_overrides = config_overrides.to_vec();
|
||||||
|
init_config_overrides.push(&id);
|
||||||
|
init_config_overrides.push(&pg_distrib_dir_param);
|
||||||
|
init_config_overrides.push(&authg_type_param);
|
||||||
|
init_config_overrides.push(&listen_http_addr_param);
|
||||||
|
init_config_overrides.push(&listen_pg_addr_param);
|
||||||
|
init_config_overrides.push(&broker_endpoints_param);
|
||||||
|
|
||||||
|
if let Some(broker_etcd_prefix_param) = broker_etcd_prefix_param.as_deref() {
|
||||||
|
init_config_overrides.push(broker_etcd_prefix_param);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.env.pageserver.auth_type != AuthType::Trust {
|
||||||
|
init_config_overrides.push("auth_validation_public_key_path='auth_public_key.pem'");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut pageserver_process = self
|
||||||
|
.start_node(&init_config_overrides, &self.env.base_data_dir, true)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to start a process for pageserver {}",
|
||||||
|
self.env.pageserver.id,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let init_result = self
|
||||||
|
.try_init_timeline(create_tenant, initial_timeline_id, pg_version)
|
||||||
|
.context("Failed to create initial tenant and timeline for pageserver");
|
||||||
|
match &init_result {
|
||||||
|
Ok(initial_timeline_id) => {
|
||||||
|
println!("Successfully initialized timeline {initial_timeline_id}")
|
||||||
|
}
|
||||||
|
Err(e) => eprintln!("{e:#}"),
|
||||||
|
}
|
||||||
|
match pageserver_process.kill() {
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!(
|
||||||
|
"Failed to stop pageserver {} process with pid {}: {e:#}",
|
||||||
|
self.env.pageserver.id,
|
||||||
|
pageserver_process.id(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Ok(()) => {
|
||||||
|
println!(
|
||||||
|
"Stopped pageserver {} process with pid {}",
|
||||||
|
self.env.pageserver.id,
|
||||||
|
pageserver_process.id(),
|
||||||
|
);
|
||||||
|
// cleanup after pageserver startup, since we do not call regular `stop_process` during init
|
||||||
|
let pid_file = self.pid_file();
|
||||||
|
if let Err(e) = fs::remove_file(&pid_file) {
|
||||||
|
if e.kind() != io::ErrorKind::NotFound {
|
||||||
|
eprintln!("Failed to remove pid file {pid_file:?} after stopping the process: {e:#}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
init_result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_init_timeline(
|
||||||
|
&self,
|
||||||
|
new_tenant_id: Option<TenantId>,
|
||||||
|
new_timeline_id: Option<TimelineId>,
|
||||||
|
pg_version: u32,
|
||||||
|
) -> anyhow::Result<TimelineId> {
|
||||||
|
let initial_tenant_id = self.tenant_create(new_tenant_id, HashMap::new())?;
|
||||||
|
let initial_timeline_info = self.timeline_create(
|
||||||
|
initial_tenant_id,
|
||||||
|
new_timeline_id,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(pg_version),
|
||||||
|
)?;
|
||||||
|
Ok(initial_timeline_info.timeline_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn repo_path(&self) -> PathBuf {
|
||||||
|
self.env.pageserver_data_dir()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The pid file is created by the pageserver process, with its pid stored inside.
|
||||||
|
/// Other pageservers cannot lock the same file and overwrite it for as long as the current
|
||||||
|
/// pageserver runs. (Unless someone removes the file manually; never do that!)
|
||||||
|
fn pid_file(&self) -> PathBuf {
|
||||||
|
self.repo_path().join("pageserver.pid")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<Child> {
|
||||||
|
self.start_node(config_overrides, &self.repo_path(), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_node(
|
||||||
|
&self,
|
||||||
|
config_overrides: &[&str],
|
||||||
|
datadir: &Path,
|
||||||
|
update_config: bool,
|
||||||
|
) -> anyhow::Result<Child> {
|
||||||
|
print!(
|
||||||
|
"Starting pageserver at '{}' in '{}'",
|
||||||
|
self.pg_connection_config.raw_address(),
|
||||||
|
datadir.display()
|
||||||
|
);
|
||||||
|
io::stdout().flush()?;
|
||||||
|
|
||||||
|
let mut args = vec![
|
||||||
|
"-D",
|
||||||
|
datadir.to_str().with_context(|| {
|
||||||
|
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
||||||
|
})?,
|
||||||
|
];
|
||||||
|
|
||||||
|
if update_config {
|
||||||
|
args.push("--update-config");
|
||||||
|
}
|
||||||
|
|
||||||
|
for config_override in config_overrides {
|
||||||
|
args.extend(["-c", config_override]);
|
||||||
|
}
|
||||||
|
|
||||||
|
background_process::start_process(
|
||||||
|
"pageserver",
|
||||||
|
datadir,
|
||||||
|
&self.env.pageserver_bin(),
|
||||||
|
&args,
|
||||||
|
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||||
|
|| match self.check_status() {
|
||||||
|
Ok(()) => Ok(true),
|
||||||
|
Err(PageserverHttpError::Transport(_)) => Ok(false),
|
||||||
|
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Stop the server.
|
||||||
|
///
|
||||||
|
/// If 'immediate' is true, we use SIGQUIT, killing the process immediately.
|
||||||
|
/// Otherwise we use SIGTERM, triggering a clean shutdown
|
||||||
|
///
|
||||||
|
/// If the server is not running, returns success
|
||||||
|
///
|
||||||
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
|
background_process::stop_process(immediate, "pageserver", &self.pid_file())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
||||||
|
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
||||||
|
|
||||||
|
println!("Pageserver query: '{sql}'");
|
||||||
|
client.simple_query(sql).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
||||||
|
self.pg_connection_config.connect_no_tls()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
|
let mut builder = self.http_client.request(method, url);
|
||||||
|
if self.env.pageserver.auth_type == AuthType::NeonJWT {
|
||||||
|
builder = builder.bearer_auth(&self.env.pageserver.auth_token)
|
||||||
|
}
|
||||||
|
builder
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_status(&self) -> Result<()> {
|
||||||
|
self.http_request(Method::GET, format!("{}/status", self.http_base_url))
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tenant_list(&self) -> Result<Vec<TenantInfo>> {
|
||||||
|
Ok(self
|
||||||
|
.http_request(Method::GET, format!("{}/tenant", self.http_base_url))
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?
|
||||||
|
.json()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tenant_create(
|
||||||
|
&self,
|
||||||
|
new_tenant_id: Option<TenantId>,
|
||||||
|
settings: HashMap<&str, &str>,
|
||||||
|
) -> anyhow::Result<TenantId> {
|
||||||
|
let mut settings = settings.clone();
|
||||||
|
let request = TenantCreateRequest {
|
||||||
|
new_tenant_id,
|
||||||
|
checkpoint_distance: settings
|
||||||
|
.remove("checkpoint_distance")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()?,
|
||||||
|
checkpoint_timeout: settings.remove("checkpoint_timeout").map(|x| x.to_string()),
|
||||||
|
compaction_target_size: settings
|
||||||
|
.remove("compaction_target_size")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()?,
|
||||||
|
compaction_period: settings.remove("compaction_period").map(|x| x.to_string()),
|
||||||
|
compaction_threshold: settings
|
||||||
|
.remove("compaction_threshold")
|
||||||
|
.map(|x| x.parse::<usize>())
|
||||||
|
.transpose()?,
|
||||||
|
gc_horizon: settings
|
||||||
|
.remove("gc_horizon")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()?,
|
||||||
|
gc_period: settings.remove("gc_period").map(|x| x.to_string()),
|
||||||
|
image_creation_threshold: settings
|
||||||
|
.remove("image_creation_threshold")
|
||||||
|
.map(|x| x.parse::<usize>())
|
||||||
|
.transpose()?,
|
||||||
|
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
||||||
|
walreceiver_connect_timeout: settings
|
||||||
|
.remove("walreceiver_connect_timeout")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
lagging_wal_timeout: settings
|
||||||
|
.remove("lagging_wal_timeout")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
max_lsn_wal_lag: settings
|
||||||
|
.remove("max_lsn_wal_lag")
|
||||||
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
|
trace_read_requests: settings
|
||||||
|
.remove("trace_read_requests")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
|
};
|
||||||
|
if !settings.is_empty() {
|
||||||
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
|
}
|
||||||
|
self.http_request(Method::POST, format!("{}/tenant", self.http_base_url))
|
||||||
|
.json(&request)
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?
|
||||||
|
.json::<Option<String>>()
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Failed to parse tenant creation response for tenant id: {new_tenant_id:?}")
|
||||||
|
})?
|
||||||
|
.context("No tenant id was found in the tenant creation response")
|
||||||
|
.and_then(|tenant_id_string| {
|
||||||
|
tenant_id_string.parse().with_context(|| {
|
||||||
|
format!("Failed to parse response string as tenant id: '{tenant_id_string}'")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tenant_config(&self, tenant_id: TenantId, settings: HashMap<&str, &str>) -> Result<()> {
|
||||||
|
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))
|
||||||
|
.json(&TenantConfigRequest {
|
||||||
|
tenant_id,
|
||||||
|
checkpoint_distance: settings
|
||||||
|
.get("checkpoint_distance")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'checkpoint_distance' as an integer")?,
|
||||||
|
checkpoint_timeout: settings.get("checkpoint_timeout").map(|x| x.to_string()),
|
||||||
|
compaction_target_size: settings
|
||||||
|
.get("compaction_target_size")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'compaction_target_size' as an integer")?,
|
||||||
|
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
||||||
|
compaction_threshold: settings
|
||||||
|
.get("compaction_threshold")
|
||||||
|
.map(|x| x.parse::<usize>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'compaction_threshold' as an integer")?,
|
||||||
|
gc_horizon: settings
|
||||||
|
.get("gc_horizon")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'gc_horizon' as an integer")?,
|
||||||
|
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
||||||
|
image_creation_threshold: settings
|
||||||
|
.get("image_creation_threshold")
|
||||||
|
.map(|x| x.parse::<usize>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
||||||
|
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
||||||
|
walreceiver_connect_timeout: settings
|
||||||
|
.get("walreceiver_connect_timeout")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
lagging_wal_timeout: settings.get("lagging_wal_timeout").map(|x| x.to_string()),
|
||||||
|
max_lsn_wal_lag: settings
|
||||||
|
.get("max_lsn_wal_lag")
|
||||||
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
|
trace_read_requests: settings
|
||||||
|
.get("trace_read_requests")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
|
})
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn timeline_list(&self, tenant_id: &TenantId) -> anyhow::Result<Vec<TimelineInfo>> {
|
||||||
|
let timeline_infos: Vec<TimelineInfo> = self
|
||||||
|
.http_request(
|
||||||
|
Method::GET,
|
||||||
|
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||||
|
)
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?
|
||||||
|
.json()?;
|
||||||
|
|
||||||
|
Ok(timeline_infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn timeline_create(
|
||||||
|
&self,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
new_timeline_id: Option<TimelineId>,
|
||||||
|
ancestor_start_lsn: Option<Lsn>,
|
||||||
|
ancestor_timeline_id: Option<TimelineId>,
|
||||||
|
pg_version: Option<u32>,
|
||||||
|
) -> anyhow::Result<TimelineInfo> {
|
||||||
|
self.http_request(
|
||||||
|
Method::POST,
|
||||||
|
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||||
|
)
|
||||||
|
.json(&TimelineCreateRequest {
|
||||||
|
new_timeline_id,
|
||||||
|
ancestor_start_lsn,
|
||||||
|
ancestor_timeline_id,
|
||||||
|
pg_version,
|
||||||
|
})
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?
|
||||||
|
.json::<Option<TimelineInfo>>()
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Failed to parse timeline creation response for tenant id: {tenant_id}")
|
||||||
|
})?
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"No timeline id was found in the timeline creation response for tenant {tenant_id}"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Import a basebackup prepared using either:
|
||||||
|
/// a) `pg_basebackup -F tar`, or
|
||||||
|
/// b) The `fullbackup` pageserver endpoint
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `tenant_id` - tenant to import into. Created if not exists
|
||||||
|
/// * `timeline_id` - id to assign to imported timeline
|
||||||
|
/// * `base` - (start lsn of basebackup, path to `base.tar` file)
|
||||||
|
/// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`)
|
||||||
|
pub fn timeline_import(
|
||||||
|
&self,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
base: (Lsn, PathBuf),
|
||||||
|
pg_wal: Option<(Lsn, PathBuf)>,
|
||||||
|
pg_version: u32,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
||||||
|
|
||||||
|
// Init base reader
|
||||||
|
let (start_lsn, base_tarfile_path) = base;
|
||||||
|
let base_tarfile = File::open(base_tarfile_path)?;
|
||||||
|
let mut base_reader = BufReader::new(base_tarfile);
|
||||||
|
|
||||||
|
// Init wal reader if necessary
|
||||||
|
let (end_lsn, wal_reader) = if let Some((end_lsn, wal_tarfile_path)) = pg_wal {
|
||||||
|
let wal_tarfile = File::open(wal_tarfile_path)?;
|
||||||
|
let wal_reader = BufReader::new(wal_tarfile);
|
||||||
|
(end_lsn, Some(wal_reader))
|
||||||
|
} else {
|
||||||
|
(start_lsn, None)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Import base
|
||||||
|
let import_cmd = format!(
|
||||||
|
"import basebackup {tenant_id} {timeline_id} {start_lsn} {end_lsn} {pg_version}"
|
||||||
|
);
|
||||||
|
let mut writer = client.copy_in(&import_cmd)?;
|
||||||
|
io::copy(&mut base_reader, &mut writer)?;
|
||||||
|
writer.finish()?;
|
||||||
|
|
||||||
|
// Import wal if necessary
|
||||||
|
if let Some(mut wal_reader) = wal_reader {
|
||||||
|
let import_cmd = format!("import wal {tenant_id} {timeline_id} {start_lsn} {end_lsn}");
|
||||||
|
let mut writer = client.copy_in(&import_cmd)?;
|
||||||
|
io::copy(&mut wal_reader, &mut writer)?;
|
||||||
|
writer.finish()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,10 +2,10 @@
|
|||||||
/// Module for parsing postgresql.conf file.
|
/// Module for parsing postgresql.conf file.
|
||||||
///
|
///
|
||||||
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
|
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
|
||||||
/// enough to extract a few settings we need in Zenith, assuming you don't do
|
/// enough to extract a few settings we need in Neon, assuming you don't do
|
||||||
/// funny stuff like include-directives or funny escaping.
|
/// funny stuff like include-directives or funny escaping.
|
||||||
use anyhow::{bail, Context, Result};
|
use anyhow::{bail, Context, Result};
|
||||||
use lazy_static::lazy_static;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -19,9 +19,7 @@ pub struct PostgresConf {
|
|||||||
hash: HashMap<String, String>,
|
hash: HashMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
static CONF_LINE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap());
|
||||||
static ref CONF_LINE_RE: Regex = Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PostgresConf {
|
impl PostgresConf {
|
||||||
pub fn new() -> PostgresConf {
|
pub fn new() -> PostgresConf {
|
||||||
@@ -139,10 +137,10 @@ fn escape_str(s: &str) -> String {
|
|||||||
//
|
//
|
||||||
// This regex is a bit more conservative than the rules in guc-file.l, so we quote some
|
// This regex is a bit more conservative than the rules in guc-file.l, so we quote some
|
||||||
// strings that PostgreSQL would accept without quoting, but that's OK.
|
// strings that PostgreSQL would accept without quoting, but that's OK.
|
||||||
lazy_static! {
|
|
||||||
static ref UNQUOTED_RE: Regex =
|
static UNQUOTED_RE: Lazy<Regex> =
|
||||||
Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap();
|
Lazy::new(|| Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap());
|
||||||
}
|
|
||||||
if UNQUOTED_RE.is_match(s) {
|
if UNQUOTED_RE.is_match(s) {
|
||||||
s.to_string()
|
s.to_string()
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,29 +1,21 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::TcpStream;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Command;
|
use std::process::Child;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::{io, result};
|
||||||
use std::{io, result, thread};
|
|
||||||
|
|
||||||
use anyhow::bail;
|
use anyhow::Context;
|
||||||
use nix::errno::Errno;
|
|
||||||
use nix::sys::signal::{kill, Signal};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use postgres::Config;
|
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
use reqwest::{IntoUrl, Method};
|
use reqwest::{IntoUrl, Method};
|
||||||
use safekeeper::http::models::TimelineCreateRequest;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use utils::{
|
use utils::{http::error::HttpErrorBody, id::NodeId};
|
||||||
connstring::connection_address,
|
|
||||||
http::error::HttpErrorBody,
|
|
||||||
zid::{NodeId, ZTenantId, ZTimelineId},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, SafekeeperConf};
|
use crate::connection::PgConnectionConfig;
|
||||||
use crate::storage::PageServerNode;
|
use crate::pageserver::PageServerNode;
|
||||||
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
use crate::{
|
||||||
|
background_process,
|
||||||
|
local_env::{LocalEnv, SafekeeperConf},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum SafekeeperHttpError {
|
pub enum SafekeeperHttpError {
|
||||||
@@ -47,12 +39,12 @@ impl ResponseErrorMessageExt for Response {
|
|||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
// reqwest do not export it's error construction utility functions, so lets craft the message ourselves
|
// reqwest does not export its error construction utility functions, so let's craft the message ourselves
|
||||||
let url = self.url().to_owned();
|
let url = self.url().to_owned();
|
||||||
Err(SafekeeperHttpError::Response(
|
Err(SafekeeperHttpError::Response(
|
||||||
match self.json::<HttpErrorBody>() {
|
match self.json::<HttpErrorBody>() {
|
||||||
Ok(err_body) => format!("Error: {}", err_body.msg),
|
Ok(err_body) => format!("Error: {}", err_body.msg),
|
||||||
Err(_) => format!("Http error ({}) at {url}.", status.as_u16()),
|
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -69,7 +61,7 @@ pub struct SafekeeperNode {
|
|||||||
|
|
||||||
pub conf: SafekeeperConf,
|
pub conf: SafekeeperConf,
|
||||||
|
|
||||||
pub pg_connection_config: Config,
|
pub pg_connection_config: PgConnectionConfig,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: Client,
|
pub http_client: Client,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
@@ -93,15 +85,15 @@ impl SafekeeperNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to this safekeeper.
|
/// Construct libpq connection string for connecting to this safekeeper.
|
||||||
fn safekeeper_connection_config(port: u16) -> Config {
|
fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
|
||||||
// TODO safekeeper authentication not implemented yet
|
// TODO safekeeper authentication not implemented yet
|
||||||
format!("postgresql://no_user@127.0.0.1:{}/no_db", port)
|
format!("postgresql://no_user@127.0.0.1:{port}/no_db")
|
||||||
.parse()
|
.parse()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
||||||
env.safekeeper_data_dir(format!("sk{}", sk_id).as_ref())
|
env.safekeeper_data_dir(&format!("sk{sk_id}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path(&self) -> PathBuf {
|
pub fn datadir_path(&self) -> PathBuf {
|
||||||
@@ -112,92 +104,78 @@ impl SafekeeperNode {
|
|||||||
self.datadir_path().join("safekeeper.pid")
|
self.datadir_path().join("safekeeper.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self) -> anyhow::Result<()> {
|
pub fn start(&self) -> anyhow::Result<Child> {
|
||||||
print!(
|
print!(
|
||||||
"Starting safekeeper at '{}' in '{}'",
|
"Starting safekeeper at '{}' in '{}'",
|
||||||
connection_address(&self.pg_connection_config),
|
self.pg_connection_config.raw_address(),
|
||||||
self.datadir_path().display()
|
self.datadir_path().display()
|
||||||
);
|
);
|
||||||
io::stdout().flush().unwrap();
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
||||||
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
||||||
|
let id = self.id;
|
||||||
|
let datadir = self.datadir_path();
|
||||||
|
|
||||||
let mut cmd = Command::new(self.env.safekeeper_bin()?);
|
let id_string = id.to_string();
|
||||||
fill_rust_env_vars(
|
let mut args = vec![
|
||||||
cmd.args(&["-D", self.datadir_path().to_str().unwrap()])
|
"-D",
|
||||||
.args(&["--id", self.id.to_string().as_ref()])
|
datadir.to_str().with_context(|| {
|
||||||
.args(&["--listen-pg", &listen_pg])
|
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
||||||
.args(&["--listen-http", &listen_http])
|
})?,
|
||||||
.args(&["--recall", "1 second"])
|
"--id",
|
||||||
.arg("--daemonize"),
|
&id_string,
|
||||||
);
|
"--listen-pg",
|
||||||
|
&listen_pg,
|
||||||
|
"--listen-http",
|
||||||
|
&listen_http,
|
||||||
|
];
|
||||||
if !self.conf.sync {
|
if !self.conf.sync {
|
||||||
cmd.arg("--no-sync");
|
args.push("--no-sync");
|
||||||
}
|
}
|
||||||
|
|
||||||
let comma_separated_endpoints = self.env.etcd_broker.comma_separated_endpoints();
|
let comma_separated_endpoints = self.env.etcd_broker.comma_separated_endpoints();
|
||||||
if !comma_separated_endpoints.is_empty() {
|
if !comma_separated_endpoints.is_empty() {
|
||||||
cmd.args(&["--broker-endpoints", &comma_separated_endpoints]);
|
args.extend(["--broker-endpoints", &comma_separated_endpoints]);
|
||||||
}
|
}
|
||||||
if let Some(prefix) = self.env.etcd_broker.broker_etcd_prefix.as_deref() {
|
if let Some(prefix) = self.env.etcd_broker.broker_etcd_prefix.as_deref() {
|
||||||
cmd.args(&["--broker-etcd-prefix", prefix]);
|
args.extend(["--broker-etcd-prefix", prefix]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut backup_threads = String::new();
|
||||||
if let Some(threads) = self.conf.backup_threads {
|
if let Some(threads) = self.conf.backup_threads {
|
||||||
cmd.args(&["--backup-threads", threads.to_string().as_ref()]);
|
backup_threads = threads.to_string();
|
||||||
|
args.extend(["--backup-threads", &backup_threads]);
|
||||||
|
} else {
|
||||||
|
drop(backup_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref remote_storage) = self.conf.remote_storage {
|
if let Some(ref remote_storage) = self.conf.remote_storage {
|
||||||
cmd.args(&["--remote-storage", remote_storage]);
|
args.extend(["--remote-storage", remote_storage]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let key_path = self.env.base_data_dir.join("auth_public_key.pem");
|
||||||
if self.conf.auth_enabled {
|
if self.conf.auth_enabled {
|
||||||
cmd.arg("--auth-validation-public-key-path");
|
args.extend([
|
||||||
// PathBuf is better be passed as is, not via `String`.
|
"--auth-validation-public-key-path",
|
||||||
cmd.arg(self.env.base_data_dir.join("auth_public_key.pem"));
|
key_path.to_str().with_context(|| {
|
||||||
|
format!("Key path {key_path:?} cannot be represented as a unicode string")
|
||||||
|
})?,
|
||||||
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fill_aws_secrets_vars(&mut cmd);
|
background_process::start_process(
|
||||||
|
&format!("safekeeper {id}"),
|
||||||
if !cmd.status()?.success() {
|
&datadir,
|
||||||
bail!(
|
&self.env.safekeeper_bin(),
|
||||||
"Safekeeper failed to start. See '{}' for details.",
|
&args,
|
||||||
self.datadir_path().join("safekeeper.log").display()
|
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||||
);
|
|| match self.check_status() {
|
||||||
}
|
Ok(()) => Ok(true),
|
||||||
|
Err(SafekeeperHttpError::Transport(_)) => Ok(false),
|
||||||
// It takes a while for the safekeeper to start up. Wait until it is
|
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
||||||
// open for business.
|
},
|
||||||
const RETRIES: i8 = 15;
|
)
|
||||||
for retries in 1..RETRIES {
|
|
||||||
match self.check_status() {
|
|
||||||
Ok(_) => {
|
|
||||||
println!("\nSafekeeper started");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
match err {
|
|
||||||
SafekeeperHttpError::Transport(err) => {
|
|
||||||
if err.is_connect() && retries < 5 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
} else {
|
|
||||||
if retries == 5 {
|
|
||||||
println!() // put a line break after dots for second message
|
|
||||||
}
|
|
||||||
println!(
|
|
||||||
"Safekeeper not responding yet, err {} retrying ({})...",
|
|
||||||
err, retries
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SafekeeperHttpError::Response(msg) => {
|
|
||||||
bail!("safekeeper failed to start: {} ", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("safekeeper failed to start in {} seconds", RETRIES);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -209,80 +187,16 @@ impl SafekeeperNode {
|
|||||||
/// If the server is not running, returns success
|
/// If the server is not running, returns success
|
||||||
///
|
///
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
let pid_file = self.pid_file();
|
background_process::stop_process(
|
||||||
if !pid_file.exists() {
|
immediate,
|
||||||
println!("Safekeeper {} is already stopped", self.id);
|
&format!("safekeeper {}", self.id),
|
||||||
return Ok(());
|
&self.pid_file(),
|
||||||
}
|
)
|
||||||
let pid = read_pidfile(&pid_file)?;
|
|
||||||
let pid = Pid::from_raw(pid);
|
|
||||||
|
|
||||||
let sig = if immediate {
|
|
||||||
print!("Stopping safekeeper {} immediately..", self.id);
|
|
||||||
Signal::SIGQUIT
|
|
||||||
} else {
|
|
||||||
print!("Stopping safekeeper {} gracefully..", self.id);
|
|
||||||
Signal::SIGTERM
|
|
||||||
};
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
match kill(pid, sig) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
println!(
|
|
||||||
"Safekeeper with pid {} does not exist, but a PID file was found",
|
|
||||||
pid
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to safekeeper with pid {}: {}",
|
|
||||||
pid,
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
let address = connection_address(&self.pg_connection_config);
|
|
||||||
|
|
||||||
// TODO Remove this "timeout" and handle it on caller side instead.
|
|
||||||
// Shutting down may take a long time,
|
|
||||||
// if safekeeper flushes a lot of data
|
|
||||||
let mut tcp_stopped = false;
|
|
||||||
for _ in 0..100 {
|
|
||||||
if !tcp_stopped {
|
|
||||||
if let Err(err) = TcpStream::connect(&address) {
|
|
||||||
tcp_stopped = true;
|
|
||||||
if err.kind() != io::ErrorKind::ConnectionRefused {
|
|
||||||
eprintln!("\nSafekeeper connection failed with error: {err}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tcp_stopped {
|
|
||||||
// Also check status on the HTTP port
|
|
||||||
match self.check_status() {
|
|
||||||
Err(SafekeeperHttpError::Transport(err)) if err.is_connect() => {
|
|
||||||
println!("done!");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("\nSafekeeper status check failed with error: {err}");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Ok(()) => {
|
|
||||||
// keep waiting
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("Failed to stop safekeeper with pid {}", pid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
// TODO: authentication
|
// TODO: authentication
|
||||||
//if self.env.auth_type == AuthType::ZenithJWT {
|
//if self.env.auth_type == AuthType::NeonJWT {
|
||||||
// builder = builder.bearer_auth(&self.env.safekeeper_auth_token)
|
// builder = builder.bearer_auth(&self.env.safekeeper_auth_token)
|
||||||
//}
|
//}
|
||||||
self.http_client.request(method, url)
|
self.http_client.request(method, url)
|
||||||
@@ -294,25 +208,4 @@ impl SafekeeperNode {
|
|||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn timeline_create(
|
|
||||||
&self,
|
|
||||||
tenant_id: ZTenantId,
|
|
||||||
timeline_id: ZTimelineId,
|
|
||||||
peer_ids: Vec<NodeId>,
|
|
||||||
) -> Result<()> {
|
|
||||||
Ok(self
|
|
||||||
.http_request(
|
|
||||||
Method::POST,
|
|
||||||
format!("{}/{}", self.http_base_url, "timeline"),
|
|
||||||
)
|
|
||||||
.json(&TimelineCreateRequest {
|
|
||||||
tenant_id,
|
|
||||||
timeline_id,
|
|
||||||
peer_ids,
|
|
||||||
})
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json()?)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,581 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{BufReader, Write};
|
|
||||||
use std::net::TcpStream;
|
|
||||||
use std::num::NonZeroU64;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::Command;
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::{io, result, thread};
|
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
|
||||||
use nix::errno::Errno;
|
|
||||||
use nix::sys::signal::{kill, Signal};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use pageserver::http::models::{TenantConfigRequest, TenantCreateRequest, TimelineCreateRequest};
|
|
||||||
use pageserver::tenant_mgr::TenantInfo;
|
|
||||||
use pageserver::timelines::TimelineInfo;
|
|
||||||
use postgres::{Config, NoTls};
|
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
|
||||||
use reqwest::{IntoUrl, Method};
|
|
||||||
use thiserror::Error;
|
|
||||||
use utils::{
|
|
||||||
connstring::connection_address,
|
|
||||||
http::error::HttpErrorBody,
|
|
||||||
lsn::Lsn,
|
|
||||||
postgres_backend::AuthType,
|
|
||||||
zid::{ZTenantId, ZTimelineId},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::local_env::LocalEnv;
|
|
||||||
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum PageserverHttpError {
|
|
||||||
#[error("Reqwest error: {0}")]
|
|
||||||
Transport(#[from] reqwest::Error),
|
|
||||||
|
|
||||||
#[error("Error: {0}")]
|
|
||||||
Response(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<anyhow::Error> for PageserverHttpError {
|
|
||||||
fn from(e: anyhow::Error) -> Self {
|
|
||||||
Self::Response(e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Result<T> = result::Result<T, PageserverHttpError>;
|
|
||||||
|
|
||||||
pub trait ResponseErrorMessageExt: Sized {
|
|
||||||
fn error_from_body(self) -> Result<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResponseErrorMessageExt for Response {
|
|
||||||
fn error_from_body(self) -> Result<Self> {
|
|
||||||
let status = self.status();
|
|
||||||
if !(status.is_client_error() || status.is_server_error()) {
|
|
||||||
return Ok(self);
|
|
||||||
}
|
|
||||||
|
|
||||||
// reqwest do not export it's error construction utility functions, so lets craft the message ourselves
|
|
||||||
let url = self.url().to_owned();
|
|
||||||
Err(PageserverHttpError::Response(
|
|
||||||
match self.json::<HttpErrorBody>() {
|
|
||||||
Ok(err_body) => format!("Error: {}", err_body.msg),
|
|
||||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Control routines for pageserver.
|
|
||||||
//
|
|
||||||
// Used in CLI and tests.
|
|
||||||
//
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct PageServerNode {
|
|
||||||
pub pg_connection_config: Config,
|
|
||||||
pub env: LocalEnv,
|
|
||||||
pub http_client: Client,
|
|
||||||
pub http_base_url: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PageServerNode {
|
|
||||||
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
|
||||||
let password = if env.pageserver.auth_type == AuthType::ZenithJWT {
|
|
||||||
&env.pageserver.auth_token
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
pg_connection_config: Self::pageserver_connection_config(
|
|
||||||
password,
|
|
||||||
&env.pageserver.listen_pg_addr,
|
|
||||||
),
|
|
||||||
env: env.clone(),
|
|
||||||
http_client: Client::new(),
|
|
||||||
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to the pageserver.
|
|
||||||
fn pageserver_connection_config(password: &str, listen_addr: &str) -> Config {
|
|
||||||
format!("postgresql://no_user:{}@{}/no_db", password, listen_addr)
|
|
||||||
.parse()
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn init(
|
|
||||||
&self,
|
|
||||||
create_tenant: Option<ZTenantId>,
|
|
||||||
initial_timeline_id: Option<ZTimelineId>,
|
|
||||||
config_overrides: &[&str],
|
|
||||||
) -> anyhow::Result<ZTimelineId> {
|
|
||||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
|
||||||
|
|
||||||
let id = format!("id={}", self.env.pageserver.id);
|
|
||||||
|
|
||||||
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
|
||||||
let base_data_dir_param = self.env.base_data_dir.display().to_string();
|
|
||||||
let pg_distrib_dir_param =
|
|
||||||
format!("pg_distrib_dir='{}'", self.env.pg_distrib_dir.display());
|
|
||||||
let authg_type_param = format!("auth_type='{}'", self.env.pageserver.auth_type);
|
|
||||||
let listen_http_addr_param = format!(
|
|
||||||
"listen_http_addr='{}'",
|
|
||||||
self.env.pageserver.listen_http_addr
|
|
||||||
);
|
|
||||||
let listen_pg_addr_param =
|
|
||||||
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
|
||||||
let broker_endpoints_param = format!(
|
|
||||||
"broker_endpoints=[{}]",
|
|
||||||
self.env
|
|
||||||
.etcd_broker
|
|
||||||
.broker_endpoints
|
|
||||||
.iter()
|
|
||||||
.map(|url| format!("'{url}'"))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(",")
|
|
||||||
);
|
|
||||||
let mut args = Vec::with_capacity(20);
|
|
||||||
|
|
||||||
args.push("--init");
|
|
||||||
args.extend(["-D", &base_data_dir_param]);
|
|
||||||
args.extend(["-c", &pg_distrib_dir_param]);
|
|
||||||
args.extend(["-c", &authg_type_param]);
|
|
||||||
args.extend(["-c", &listen_http_addr_param]);
|
|
||||||
args.extend(["-c", &listen_pg_addr_param]);
|
|
||||||
args.extend(["-c", &broker_endpoints_param]);
|
|
||||||
args.extend(["-c", &id]);
|
|
||||||
|
|
||||||
let broker_etcd_prefix_param = self
|
|
||||||
.env
|
|
||||||
.etcd_broker
|
|
||||||
.broker_etcd_prefix
|
|
||||||
.as_ref()
|
|
||||||
.map(|prefix| format!("broker_etcd_prefix='{prefix}'"));
|
|
||||||
if let Some(broker_etcd_prefix_param) = broker_etcd_prefix_param.as_deref() {
|
|
||||||
args.extend(["-c", broker_etcd_prefix_param]);
|
|
||||||
}
|
|
||||||
|
|
||||||
for config_override in config_overrides {
|
|
||||||
args.extend(["-c", config_override]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.env.pageserver.auth_type != AuthType::Trust {
|
|
||||||
args.extend([
|
|
||||||
"-c",
|
|
||||||
"auth_validation_public_key_path='auth_public_key.pem'",
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let create_tenant = create_tenant.map(|id| id.to_string());
|
|
||||||
if let Some(tenant_id) = create_tenant.as_deref() {
|
|
||||||
args.extend(["--create-tenant", tenant_id])
|
|
||||||
}
|
|
||||||
|
|
||||||
let initial_timeline_id = initial_timeline_id.unwrap_or_else(ZTimelineId::generate);
|
|
||||||
let initial_timeline_id_string = initial_timeline_id.to_string();
|
|
||||||
args.extend(["--initial-timeline-id", &initial_timeline_id_string]);
|
|
||||||
|
|
||||||
let cmd_with_args = cmd.args(args);
|
|
||||||
let init_output = fill_rust_env_vars(cmd_with_args)
|
|
||||||
.output()
|
|
||||||
.with_context(|| {
|
|
||||||
format!("failed to init pageserver with command {:?}", cmd_with_args)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !init_output.status.success() {
|
|
||||||
bail!(
|
|
||||||
"init invocation failed, {}\nStdout: {}\nStderr: {}",
|
|
||||||
init_output.status,
|
|
||||||
String::from_utf8_lossy(&init_output.stdout),
|
|
||||||
String::from_utf8_lossy(&init_output.stderr)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// echo the captured output of the init command
|
|
||||||
println!("{}", String::from_utf8_lossy(&init_output.stdout));
|
|
||||||
|
|
||||||
Ok(initial_timeline_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn repo_path(&self) -> PathBuf {
|
|
||||||
self.env.pageserver_data_dir()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pid_file(&self) -> PathBuf {
|
|
||||||
self.repo_path().join("pageserver.pid")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
|
||||||
print!(
|
|
||||||
"Starting pageserver at '{}' in '{}'",
|
|
||||||
connection_address(&self.pg_connection_config),
|
|
||||||
self.repo_path().display()
|
|
||||||
);
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
|
|
||||||
let repo_path = self.repo_path();
|
|
||||||
let mut args = vec!["-D", repo_path.to_str().unwrap()];
|
|
||||||
|
|
||||||
for config_override in config_overrides {
|
|
||||||
args.extend(["-c", config_override]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
|
||||||
let mut filled_cmd = fill_rust_env_vars(cmd.args(&args).arg("--daemonize"));
|
|
||||||
filled_cmd = fill_aws_secrets_vars(filled_cmd);
|
|
||||||
|
|
||||||
if !filled_cmd.status()?.success() {
|
|
||||||
bail!(
|
|
||||||
"Pageserver failed to start. See '{}' for details.",
|
|
||||||
self.repo_path().join("pageserver.log").display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// It takes a while for the page server to start up. Wait until it is
|
|
||||||
// open for business.
|
|
||||||
const RETRIES: i8 = 15;
|
|
||||||
for retries in 1..RETRIES {
|
|
||||||
match self.check_status() {
|
|
||||||
Ok(_) => {
|
|
||||||
println!("\nPageserver started");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
match err {
|
|
||||||
PageserverHttpError::Transport(err) => {
|
|
||||||
if err.is_connect() && retries < 5 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
} else {
|
|
||||||
if retries == 5 {
|
|
||||||
println!() // put a line break after dots for second message
|
|
||||||
}
|
|
||||||
println!(
|
|
||||||
"Pageserver not responding yet, err {} retrying ({})...",
|
|
||||||
err, retries
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PageserverHttpError::Response(msg) => {
|
|
||||||
bail!("pageserver failed to start: {} ", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("pageserver failed to start in {} seconds", RETRIES);
|
|
||||||
}
|
|
||||||
|
|
||||||
///
|
|
||||||
/// Stop the server.
|
|
||||||
///
|
|
||||||
/// If 'immediate' is true, we use SIGQUIT, killing the process immediately.
|
|
||||||
/// Otherwise we use SIGTERM, triggering a clean shutdown
|
|
||||||
///
|
|
||||||
/// If the server is not running, returns success
|
|
||||||
///
|
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
|
||||||
let pid_file = self.pid_file();
|
|
||||||
if !pid_file.exists() {
|
|
||||||
println!("Pageserver is already stopped");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let pid = Pid::from_raw(read_pidfile(&pid_file)?);
|
|
||||||
|
|
||||||
let sig = if immediate {
|
|
||||||
print!("Stopping pageserver immediately..");
|
|
||||||
Signal::SIGQUIT
|
|
||||||
} else {
|
|
||||||
print!("Stopping pageserver gracefully..");
|
|
||||||
Signal::SIGTERM
|
|
||||||
};
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
match kill(pid, sig) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
println!(
|
|
||||||
"Pageserver with pid {} does not exist, but a PID file was found",
|
|
||||||
pid
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to pageserver with pid {}: {}",
|
|
||||||
pid,
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
let address = connection_address(&self.pg_connection_config);
|
|
||||||
|
|
||||||
// TODO Remove this "timeout" and handle it on caller side instead.
|
|
||||||
// Shutting down may take a long time,
|
|
||||||
// if pageserver checkpoints a lot of data
|
|
||||||
let mut tcp_stopped = false;
|
|
||||||
for _ in 0..100 {
|
|
||||||
if !tcp_stopped {
|
|
||||||
if let Err(err) = TcpStream::connect(&address) {
|
|
||||||
tcp_stopped = true;
|
|
||||||
if err.kind() != io::ErrorKind::ConnectionRefused {
|
|
||||||
eprintln!("\nPageserver connection failed with error: {err}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tcp_stopped {
|
|
||||||
// Also check status on the HTTP port
|
|
||||||
|
|
||||||
match self.check_status() {
|
|
||||||
Err(PageserverHttpError::Transport(err)) if err.is_connect() => {
|
|
||||||
println!("done!");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("\nPageserver status check failed with error: {err}");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Ok(()) => {
|
|
||||||
// keep waiting
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("Failed to stop pageserver with pid {}", pid);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
|
||||||
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
|
||||||
|
|
||||||
println!("Pageserver query: '{}'", sql);
|
|
||||||
client.simple_query(sql).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
|
||||||
self.pg_connection_config.connect(NoTls)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
|
||||||
let mut builder = self.http_client.request(method, url);
|
|
||||||
if self.env.pageserver.auth_type == AuthType::ZenithJWT {
|
|
||||||
builder = builder.bearer_auth(&self.env.pageserver.auth_token)
|
|
||||||
}
|
|
||||||
builder
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_status(&self) -> Result<()> {
|
|
||||||
self.http_request(Method::GET, format!("{}/status", self.http_base_url))
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tenant_list(&self) -> Result<Vec<TenantInfo>> {
|
|
||||||
Ok(self
|
|
||||||
.http_request(Method::GET, format!("{}/tenant", self.http_base_url))
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tenant_create(
|
|
||||||
&self,
|
|
||||||
new_tenant_id: Option<ZTenantId>,
|
|
||||||
settings: HashMap<&str, &str>,
|
|
||||||
) -> anyhow::Result<Option<ZTenantId>> {
|
|
||||||
let tenant_id_string = self
|
|
||||||
.http_request(Method::POST, format!("{}/tenant", self.http_base_url))
|
|
||||||
.json(&TenantCreateRequest {
|
|
||||||
new_tenant_id,
|
|
||||||
checkpoint_distance: settings
|
|
||||||
.get("checkpoint_distance")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()?,
|
|
||||||
compaction_target_size: settings
|
|
||||||
.get("compaction_target_size")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()?,
|
|
||||||
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
|
||||||
compaction_threshold: settings
|
|
||||||
.get("compaction_threshold")
|
|
||||||
.map(|x| x.parse::<usize>())
|
|
||||||
.transpose()?,
|
|
||||||
gc_horizon: settings
|
|
||||||
.get("gc_horizon")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()?,
|
|
||||||
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
|
||||||
image_creation_threshold: settings
|
|
||||||
.get("image_creation_threshold")
|
|
||||||
.map(|x| x.parse::<usize>())
|
|
||||||
.transpose()?,
|
|
||||||
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
|
||||||
walreceiver_connect_timeout: settings
|
|
||||||
.get("walreceiver_connect_timeout")
|
|
||||||
.map(|x| x.to_string()),
|
|
||||||
lagging_wal_timeout: settings.get("lagging_wal_timeout").map(|x| x.to_string()),
|
|
||||||
max_lsn_wal_lag: settings
|
|
||||||
.get("max_lsn_wal_lag")
|
|
||||||
.map(|x| x.parse::<NonZeroU64>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
|
||||||
})
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json::<Option<String>>()?;
|
|
||||||
|
|
||||||
tenant_id_string
|
|
||||||
.map(|id| {
|
|
||||||
id.parse().with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to parse tennat creation response as tenant id: {}",
|
|
||||||
id
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tenant_config(&self, tenant_id: ZTenantId, settings: HashMap<&str, &str>) -> Result<()> {
|
|
||||||
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))
|
|
||||||
.json(&TenantConfigRequest {
|
|
||||||
tenant_id,
|
|
||||||
checkpoint_distance: settings
|
|
||||||
.get("checkpoint_distance")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'checkpoint_distance' as an integer")?,
|
|
||||||
compaction_target_size: settings
|
|
||||||
.get("compaction_target_size")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'compaction_target_size' as an integer")?,
|
|
||||||
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
|
||||||
compaction_threshold: settings
|
|
||||||
.get("compaction_threshold")
|
|
||||||
.map(|x| x.parse::<usize>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'compaction_threshold' as an integer")?,
|
|
||||||
gc_horizon: settings
|
|
||||||
.get("gc_horizon")
|
|
||||||
.map(|x| x.parse::<u64>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'gc_horizon' as an integer")?,
|
|
||||||
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
|
||||||
image_creation_threshold: settings
|
|
||||||
.get("image_creation_threshold")
|
|
||||||
.map(|x| x.parse::<usize>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
|
||||||
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
|
||||||
walreceiver_connect_timeout: settings
|
|
||||||
.get("walreceiver_connect_timeout")
|
|
||||||
.map(|x| x.to_string()),
|
|
||||||
lagging_wal_timeout: settings.get("lagging_wal_timeout").map(|x| x.to_string()),
|
|
||||||
max_lsn_wal_lag: settings
|
|
||||||
.get("max_lsn_wal_lag")
|
|
||||||
.map(|x| x.parse::<NonZeroU64>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
|
||||||
})
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn timeline_list(&self, tenant_id: &ZTenantId) -> anyhow::Result<Vec<TimelineInfo>> {
|
|
||||||
let timeline_infos: Vec<TimelineInfo> = self
|
|
||||||
.http_request(
|
|
||||||
Method::GET,
|
|
||||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
|
||||||
)
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json()?;
|
|
||||||
|
|
||||||
Ok(timeline_infos)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn timeline_create(
|
|
||||||
&self,
|
|
||||||
tenant_id: ZTenantId,
|
|
||||||
new_timeline_id: Option<ZTimelineId>,
|
|
||||||
ancestor_start_lsn: Option<Lsn>,
|
|
||||||
ancestor_timeline_id: Option<ZTimelineId>,
|
|
||||||
) -> anyhow::Result<Option<TimelineInfo>> {
|
|
||||||
let timeline_info_response = self
|
|
||||||
.http_request(
|
|
||||||
Method::POST,
|
|
||||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
|
||||||
)
|
|
||||||
.json(&TimelineCreateRequest {
|
|
||||||
new_timeline_id,
|
|
||||||
ancestor_start_lsn,
|
|
||||||
ancestor_timeline_id,
|
|
||||||
})
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json::<Option<TimelineInfo>>()?;
|
|
||||||
|
|
||||||
Ok(timeline_info_response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import a basebackup prepared using either:
|
|
||||||
/// a) `pg_basebackup -F tar`, or
|
|
||||||
/// b) The `fullbackup` pageserver endpoint
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `tenant_id` - tenant to import into. Created if not exists
|
|
||||||
/// * `timeline_id` - id to assign to imported timeline
|
|
||||||
/// * `base` - (start lsn of basebackup, path to `base.tar` file)
|
|
||||||
/// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`)
|
|
||||||
pub fn timeline_import(
|
|
||||||
&self,
|
|
||||||
tenant_id: ZTenantId,
|
|
||||||
timeline_id: ZTimelineId,
|
|
||||||
base: (Lsn, PathBuf),
|
|
||||||
pg_wal: Option<(Lsn, PathBuf)>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
|
||||||
|
|
||||||
// Init base reader
|
|
||||||
let (start_lsn, base_tarfile_path) = base;
|
|
||||||
let base_tarfile = File::open(base_tarfile_path)?;
|
|
||||||
let mut base_reader = BufReader::new(base_tarfile);
|
|
||||||
|
|
||||||
// Init wal reader if necessary
|
|
||||||
let (end_lsn, wal_reader) = if let Some((end_lsn, wal_tarfile_path)) = pg_wal {
|
|
||||||
let wal_tarfile = File::open(wal_tarfile_path)?;
|
|
||||||
let wal_reader = BufReader::new(wal_tarfile);
|
|
||||||
(end_lsn, Some(wal_reader))
|
|
||||||
} else {
|
|
||||||
(start_lsn, None)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Import base
|
|
||||||
let import_cmd =
|
|
||||||
format!("import basebackup {tenant_id} {timeline_id} {start_lsn} {end_lsn}");
|
|
||||||
let mut writer = client.copy_in(&import_cmd)?;
|
|
||||||
io::copy(&mut base_reader, &mut writer)?;
|
|
||||||
writer.finish()?;
|
|
||||||
|
|
||||||
// Import wal if necessary
|
|
||||||
if let Some(mut wal_reader) = wal_reader {
|
|
||||||
let import_cmd = format!("import wal {tenant_id} {timeline_id} {start_lsn} {end_lsn}");
|
|
||||||
let mut writer = client.copy_in(&import_cmd)?;
|
|
||||||
io::copy(&mut wal_reader, &mut writer)?;
|
|
||||||
writer.finish()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
13
docker-compose/compute_wrapper/Dockerfile
Normal file
13
docker-compose/compute_wrapper/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
ARG COMPUTE_IMAGE=compute-node-v14
|
||||||
|
ARG TAG=latest
|
||||||
|
|
||||||
|
FROM $REPOSITORY/${COMPUTE_IMAGE}:$TAG
|
||||||
|
|
||||||
|
USER root
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl \
|
||||||
|
jq \
|
||||||
|
netcat
|
||||||
|
|
||||||
|
USER postgres
|
||||||
48
docker-compose/compute_wrapper/shell/compute.sh
Executable file
48
docker-compose/compute_wrapper/shell/compute.sh
Executable file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
PG_VERSION=${PG_VERSION:-14}
|
||||||
|
|
||||||
|
SPEC_FILE_ORG=/var/db/postgres/specs/spec.json
|
||||||
|
SPEC_FILE=/tmp/spec.json
|
||||||
|
|
||||||
|
echo "Waiting pageserver become ready."
|
||||||
|
while ! nc -z pageserver 6400; do
|
||||||
|
sleep 1;
|
||||||
|
done
|
||||||
|
echo "Page server is ready."
|
||||||
|
|
||||||
|
echo "Create a tenant and timeline"
|
||||||
|
PARAMS=(
|
||||||
|
-sb
|
||||||
|
-X POST
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
-d "{}"
|
||||||
|
http://pageserver:9898/v1/tenant/
|
||||||
|
)
|
||||||
|
tenant_id=$(curl "${PARAMS[@]}" | sed 's/"//g')
|
||||||
|
|
||||||
|
PARAMS=(
|
||||||
|
-sb
|
||||||
|
-X POST
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
-d "{\"tenant_id\":\"${tenant_id}\", \"pg_version\": ${PG_VERSION}}"
|
||||||
|
"http://pageserver:9898/v1/tenant/${tenant_id}/timeline/"
|
||||||
|
)
|
||||||
|
result=$(curl "${PARAMS[@]}")
|
||||||
|
echo $result | jq .
|
||||||
|
|
||||||
|
echo "Overwrite tenant id and timeline id in spec file"
|
||||||
|
tenant_id=$(echo ${result} | jq -r .tenant_id)
|
||||||
|
timeline_id=$(echo ${result} | jq -r .timeline_id)
|
||||||
|
|
||||||
|
sed "s/TENANT_ID/${tenant_id}/" ${SPEC_FILE_ORG} > ${SPEC_FILE}
|
||||||
|
sed -i "s/TIMELINE_ID/${timeline_id}/" ${SPEC_FILE}
|
||||||
|
|
||||||
|
cat ${SPEC_FILE}
|
||||||
|
|
||||||
|
echo "Start compute node"
|
||||||
|
/usr/local/bin/compute_ctl --pgdata /var/db/postgres/compute \
|
||||||
|
-C "postgresql://cloud_admin@localhost:55433/postgres" \
|
||||||
|
-b /usr/local/bin/postgres \
|
||||||
|
-S ${SPEC_FILE}
|
||||||
141
docker-compose/compute_wrapper/var/db/postgres/specs/spec.json
Normal file
141
docker-compose/compute_wrapper/var/db/postgres/specs/spec.json
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
{
|
||||||
|
"format_version": 1.0,
|
||||||
|
|
||||||
|
"timestamp": "2022-10-12T18:00:00.000Z",
|
||||||
|
"operation_uuid": "0f657b36-4b0f-4a2d-9c2e-1dcd615e7d8c",
|
||||||
|
|
||||||
|
"cluster": {
|
||||||
|
"cluster_id": "docker_compose",
|
||||||
|
"name": "docker_compose_test",
|
||||||
|
"state": "restarted",
|
||||||
|
"roles": [
|
||||||
|
{
|
||||||
|
"name": "cloud_admin",
|
||||||
|
"encrypted_password": "b093c0d3b281ba6da1eacc608620abd8",
|
||||||
|
"options": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"databases": [
|
||||||
|
],
|
||||||
|
"settings": [
|
||||||
|
{
|
||||||
|
"name": "fsync",
|
||||||
|
"value": "off",
|
||||||
|
"vartype": "bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "wal_level",
|
||||||
|
"value": "replica",
|
||||||
|
"vartype": "enum"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "hot_standby",
|
||||||
|
"value": "on",
|
||||||
|
"vartype": "bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "wal_log_hints",
|
||||||
|
"value": "on",
|
||||||
|
"vartype": "bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "log_connections",
|
||||||
|
"value": "on",
|
||||||
|
"vartype": "bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "port",
|
||||||
|
"value": "55433",
|
||||||
|
"vartype": "integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "shared_buffers",
|
||||||
|
"value": "1MB",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "max_connections",
|
||||||
|
"value": "100",
|
||||||
|
"vartype": "integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "listen_addresses",
|
||||||
|
"value": "0.0.0.0",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "max_wal_senders",
|
||||||
|
"value": "10",
|
||||||
|
"vartype": "integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "max_replication_slots",
|
||||||
|
"value": "10",
|
||||||
|
"vartype": "integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "wal_sender_timeout",
|
||||||
|
"value": "5s",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "wal_keep_size",
|
||||||
|
"value": "0",
|
||||||
|
"vartype": "integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "password_encryption",
|
||||||
|
"value": "md5",
|
||||||
|
"vartype": "enum"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "restart_after_crash",
|
||||||
|
"value": "off",
|
||||||
|
"vartype": "bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "synchronous_standby_names",
|
||||||
|
"value": "walproposer",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "shared_preload_libraries",
|
||||||
|
"value": "neon",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "neon.safekeepers",
|
||||||
|
"value": "safekeeper1:5454,safekeeper2:5454,safekeeper3:5454",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "neon.timeline_id",
|
||||||
|
"value": "TIMELINE_ID",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "neon.tenant_id",
|
||||||
|
"value": "TENANT_ID",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "neon.pageserver_connstring",
|
||||||
|
"value": "host=pageserver port=6400",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "max_replication_write_lag",
|
||||||
|
"value": "500MB",
|
||||||
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "max_replication_flush_lag",
|
||||||
|
"value": "10GB",
|
||||||
|
"vartype": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
|
||||||
|
"delta_operations": [
|
||||||
|
]
|
||||||
|
}
|
||||||
209
docker-compose/docker-compose.yml
Normal file
209
docker-compose/docker-compose.yml
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
etcd:
|
||||||
|
restart: always
|
||||||
|
image: quay.io/coreos/etcd:v3.5.4
|
||||||
|
ports:
|
||||||
|
- 2379:2379
|
||||||
|
- 2380:2380
|
||||||
|
environment:
|
||||||
|
# This signifficantly speeds up etcd and we anyway don't data persistency there.
|
||||||
|
ETCD_UNSAFE_NO_FSYNC: "1"
|
||||||
|
command:
|
||||||
|
- "etcd"
|
||||||
|
- "--auto-compaction-mode=revision"
|
||||||
|
- "--auto-compaction-retention=1"
|
||||||
|
- "--name=etcd-cluster"
|
||||||
|
- "--initial-cluster-state=new"
|
||||||
|
- "--initial-cluster-token=etcd-cluster-1"
|
||||||
|
- "--initial-cluster=etcd-cluster=http://etcd:2380"
|
||||||
|
- "--initial-advertise-peer-urls=http://etcd:2380"
|
||||||
|
- "--advertise-client-urls=http://etcd:2379"
|
||||||
|
- "--listen-client-urls=http://0.0.0.0:2379"
|
||||||
|
- "--listen-peer-urls=http://0.0.0.0:2380"
|
||||||
|
- "--quota-backend-bytes=134217728" # 128 MB
|
||||||
|
|
||||||
|
minio:
|
||||||
|
restart: always
|
||||||
|
image: quay.io/minio/minio:RELEASE.2022-10-20T00-55-09Z
|
||||||
|
ports:
|
||||||
|
- 9000:9000
|
||||||
|
- 9001:9001
|
||||||
|
environment:
|
||||||
|
- MINIO_ROOT_USER=minio
|
||||||
|
- MINIO_ROOT_PASSWORD=password
|
||||||
|
command: server /data --address :9000 --console-address ":9001"
|
||||||
|
|
||||||
|
minio_create_buckets:
|
||||||
|
image: minio/mc
|
||||||
|
environment:
|
||||||
|
- MINIO_ROOT_USER=minio
|
||||||
|
- MINIO_ROOT_PASSWORD=password
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "until (/usr/bin/mc alias set minio http://minio:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD) do
|
||||||
|
echo 'Waiting to start minio...' && sleep 1;
|
||||||
|
done;
|
||||||
|
/usr/bin/mc mb minio/neon --region=eu-north-1;
|
||||||
|
exit 0;"
|
||||||
|
depends_on:
|
||||||
|
- minio
|
||||||
|
|
||||||
|
pageserver:
|
||||||
|
restart: always
|
||||||
|
image: ${REPOSITORY:-neondatabase}/neon:${TAG:-latest}
|
||||||
|
environment:
|
||||||
|
- BROKER_ENDPOINT='http://etcd:2379'
|
||||||
|
- AWS_ACCESS_KEY_ID=minio
|
||||||
|
- AWS_SECRET_ACCESS_KEY=password
|
||||||
|
#- RUST_BACKTRACE=1
|
||||||
|
ports:
|
||||||
|
#- 6400:6400 # pg protocol handler
|
||||||
|
- 9898:9898 # http endpoints
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "/usr/local/bin/pageserver -D /data/.neon/
|
||||||
|
-c \"broker_endpoints=[$$BROKER_ENDPOINT]\"
|
||||||
|
-c \"listen_pg_addr='0.0.0.0:6400'\"
|
||||||
|
-c \"listen_http_addr='0.0.0.0:9898'\"
|
||||||
|
-c \"remote_storage={endpoint='http://minio:9000',
|
||||||
|
bucket_name='neon',
|
||||||
|
bucket_region='eu-north-1',
|
||||||
|
prefix_in_bucket='/pageserver/'}\""
|
||||||
|
depends_on:
|
||||||
|
- etcd
|
||||||
|
- minio_create_buckets
|
||||||
|
|
||||||
|
safekeeper1:
|
||||||
|
restart: always
|
||||||
|
image: ${REPOSITORY:-neondatabase}/neon:${TAG:-latest}
|
||||||
|
environment:
|
||||||
|
- SAFEKEEPER_ADVERTISE_URL=safekeeper1:5454
|
||||||
|
- SAFEKEEPER_ID=1
|
||||||
|
- BROKER_ENDPOINT=http://etcd:2379
|
||||||
|
- AWS_ACCESS_KEY_ID=minio
|
||||||
|
- AWS_SECRET_ACCESS_KEY=password
|
||||||
|
#- RUST_BACKTRACE=1
|
||||||
|
ports:
|
||||||
|
#- 5454:5454 # pg protocol handler
|
||||||
|
- 7676:7676 # http endpoints
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "safekeeper --listen-pg=$$SAFEKEEPER_ADVERTISE_URL
|
||||||
|
--listen-http='0.0.0.0:7676'
|
||||||
|
--id=$$SAFEKEEPER_ID
|
||||||
|
--broker-endpoints=$$BROKER_ENDPOINT
|
||||||
|
-D /data
|
||||||
|
--remote-storage=\"{endpoint='http://minio:9000',
|
||||||
|
bucket_name='neon',
|
||||||
|
bucket_region='eu-north-1',
|
||||||
|
prefix_in_bucket='/safekeeper/'}\""
|
||||||
|
depends_on:
|
||||||
|
- etcd
|
||||||
|
- minio_create_buckets
|
||||||
|
|
||||||
|
safekeeper2:
|
||||||
|
restart: always
|
||||||
|
image: ${REPOSITORY:-neondatabase}/neon:${TAG:-latest}
|
||||||
|
environment:
|
||||||
|
- SAFEKEEPER_ADVERTISE_URL=safekeeper2:5454
|
||||||
|
- SAFEKEEPER_ID=2
|
||||||
|
- BROKER_ENDPOINT=http://etcd:2379
|
||||||
|
- AWS_ACCESS_KEY_ID=minio
|
||||||
|
- AWS_SECRET_ACCESS_KEY=password
|
||||||
|
#- RUST_BACKTRACE=1
|
||||||
|
ports:
|
||||||
|
#- 5454:5454 # pg protocol handler
|
||||||
|
- 7677:7676 # http endpoints
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "safekeeper --listen-pg=$$SAFEKEEPER_ADVERTISE_URL
|
||||||
|
--listen-http='0.0.0.0:7676'
|
||||||
|
--id=$$SAFEKEEPER_ID
|
||||||
|
--broker-endpoints=$$BROKER_ENDPOINT
|
||||||
|
-D /data
|
||||||
|
--remote-storage=\"{endpoint='http://minio:9000',
|
||||||
|
bucket_name='neon',
|
||||||
|
bucket_region='eu-north-1',
|
||||||
|
prefix_in_bucket='/safekeeper/'}\""
|
||||||
|
depends_on:
|
||||||
|
- etcd
|
||||||
|
- minio_create_buckets
|
||||||
|
|
||||||
|
safekeeper3:
|
||||||
|
restart: always
|
||||||
|
image: ${REPOSITORY:-neondatabase}/neon:${TAG:-latest}
|
||||||
|
environment:
|
||||||
|
- SAFEKEEPER_ADVERTISE_URL=safekeeper3:5454
|
||||||
|
- SAFEKEEPER_ID=3
|
||||||
|
- BROKER_ENDPOINT=http://etcd:2379
|
||||||
|
- AWS_ACCESS_KEY_ID=minio
|
||||||
|
- AWS_SECRET_ACCESS_KEY=password
|
||||||
|
#- RUST_BACKTRACE=1
|
||||||
|
ports:
|
||||||
|
#- 5454:5454 # pg protocol handler
|
||||||
|
- 7678:7676 # http endpoints
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "safekeeper --listen-pg=$$SAFEKEEPER_ADVERTISE_URL
|
||||||
|
--listen-http='0.0.0.0:7676'
|
||||||
|
--id=$$SAFEKEEPER_ID
|
||||||
|
--broker-endpoints=$$BROKER_ENDPOINT
|
||||||
|
-D /data
|
||||||
|
--remote-storage=\"{endpoint='http://minio:9000',
|
||||||
|
bucket_name='neon',
|
||||||
|
bucket_region='eu-north-1',
|
||||||
|
prefix_in_bucket='/safekeeper/'}\""
|
||||||
|
depends_on:
|
||||||
|
- etcd
|
||||||
|
- minio_create_buckets
|
||||||
|
|
||||||
|
compute:
|
||||||
|
restart: always
|
||||||
|
build:
|
||||||
|
context: ./compute_wrapper/
|
||||||
|
args:
|
||||||
|
- COMPUTE_IMAGE=compute-node-v${PG_VERSION:-14}
|
||||||
|
- TAG=${TAG:-latest}
|
||||||
|
- http_proxy=$http_proxy
|
||||||
|
- https_proxy=$https_proxy
|
||||||
|
environment:
|
||||||
|
- PG_VERSION=${PG_VERSION:-14}
|
||||||
|
#- RUST_BACKTRACE=1
|
||||||
|
# Mount the test files directly, for faster editing cycle.
|
||||||
|
volumes:
|
||||||
|
- ./compute_wrapper/var/db/postgres/specs/:/var/db/postgres/specs/
|
||||||
|
- ./compute_wrapper/shell/:/shell/
|
||||||
|
ports:
|
||||||
|
- 55433:55433 # pg protocol handler
|
||||||
|
- 3080:3080 # http endpoints
|
||||||
|
entrypoint:
|
||||||
|
- "/shell/compute.sh"
|
||||||
|
depends_on:
|
||||||
|
- safekeeper1
|
||||||
|
- safekeeper2
|
||||||
|
- safekeeper3
|
||||||
|
- pageserver
|
||||||
|
|
||||||
|
compute_is_ready:
|
||||||
|
image: postgres:latest
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/bash"
|
||||||
|
- "-c"
|
||||||
|
command:
|
||||||
|
- "until pg_isready -h compute -p 55433 ; do
|
||||||
|
echo 'Waiting to start compute...' && sleep 1;
|
||||||
|
done"
|
||||||
|
depends_on:
|
||||||
|
- compute
|
||||||
60
docker-compose/docker_compose_test.sh
Executable file
60
docker-compose/docker_compose_test.sh
Executable file
@@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# A basic test to ensure Docker images are built correctly.
|
||||||
|
# Build a wrapper around the compute, start all services and runs a simple SQL query.
|
||||||
|
# Repeats the process for all currenly supported Postgres versions.
|
||||||
|
|
||||||
|
# Implicitly accepts `REPOSITORY` and `TAG` env vars that are passed into the compose file
|
||||||
|
# Their defaults point at DockerHub `neondatabase/neon:latest` image.`,
|
||||||
|
# to verify custom image builds (e.g pre-published ones).
|
||||||
|
|
||||||
|
# XXX: Current does not work on M1 macs due to x86_64 Docker images compiled only, and no seccomp support in M1 Docker emulation layer.
|
||||||
|
|
||||||
|
set -eux -o pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||||
|
COMPOSE_FILE=$SCRIPT_DIR/docker-compose.yml
|
||||||
|
|
||||||
|
COMPUTE_CONTAINER_NAME=docker-compose-compute-1
|
||||||
|
SQL="CREATE TABLE t(key int primary key, value text); insert into t values(1,1); select * from t;"
|
||||||
|
PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -c '$SQL' postgres"
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "show container information"
|
||||||
|
docker ps
|
||||||
|
docker compose -f $COMPOSE_FILE logs
|
||||||
|
echo "stop containers..."
|
||||||
|
docker compose -f $COMPOSE_FILE down
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "clean up containers if exists"
|
||||||
|
cleanup
|
||||||
|
|
||||||
|
for pg_version in 14 15; do
|
||||||
|
echo "start containers (pg_version=$pg_version)."
|
||||||
|
PG_VERSION=$pg_version docker compose -f $COMPOSE_FILE up --build -d
|
||||||
|
|
||||||
|
echo "wait until the compute is ready. timeout after 60s. "
|
||||||
|
cnt=0
|
||||||
|
while sleep 1; do
|
||||||
|
# check timeout
|
||||||
|
cnt=`expr $cnt + 1`
|
||||||
|
if [ $cnt -gt 60 ]; then
|
||||||
|
echo "timeout before the compute is ready."
|
||||||
|
cleanup
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if the compute is ready
|
||||||
|
set +o pipefail
|
||||||
|
result=`docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep "accepting connections" | wc -l`
|
||||||
|
set -o pipefail
|
||||||
|
if [ $result -eq 1 ]; then
|
||||||
|
echo "OK. The compute is ready to connect."
|
||||||
|
echo "execute simple queries."
|
||||||
|
docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION"
|
||||||
|
cleanup
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
broker_endpoints_param="${BROKER_ENDPOINT:-absent}"
|
|
||||||
if [ "$broker_endpoints_param" != "absent" ]; then
|
|
||||||
broker_endpoints_param="-c broker_endpoints=['$broker_endpoints_param']"
|
|
||||||
else
|
|
||||||
broker_endpoints_param=''
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = 'pageserver' ]; then
|
|
||||||
if [ ! -d "/data/tenants" ]; then
|
|
||||||
echo "Initializing pageserver data directory"
|
|
||||||
pageserver --init -D /data -c "pg_distrib_dir='/usr/local'" -c "id=10" $broker_endpoints_param
|
|
||||||
fi
|
|
||||||
echo "Staring pageserver at 0.0.0.0:6400"
|
|
||||||
pageserver -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" $broker_endpoints_param -D /data
|
|
||||||
else
|
|
||||||
"$@"
|
|
||||||
fi
|
|
||||||
1
docs/.gitignore
vendored
Normal file
1
docs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
book
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
# Zenith documentation
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
- [authentication.md](authentication.md) — pageserver JWT authentication.
|
|
||||||
- [docker.md](docker.md) — Docker images and building pipeline.
|
|
||||||
- [glossary.md](glossary.md) — Glossary of all the terms used in codebase.
|
|
||||||
- [multitenancy.md](multitenancy.md) — how multitenancy is organized in the pageserver and Zenith CLI.
|
|
||||||
- [sourcetree.md](sourcetree.md) — Overview of the source tree layout.
|
|
||||||
- [pageserver/README.md](/pageserver/README.md) — pageserver overview.
|
|
||||||
- [postgres_ffi/README.md](/libs/postgres_ffi/README.md) — Postgres FFI overview.
|
|
||||||
- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview.
|
|
||||||
- [safekeeper/README.md](/safekeeper/README.md) — WAL service overview.
|
|
||||||
- [core_changes.md](core_changes.md) - Description of Zenith changes in Postgres core
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user