mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-20 11:00:38 +00:00
Compare commits
602 Commits
layer_map_
...
pg-extensi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
34ac170904 | ||
|
|
86d7cb674d | ||
|
|
7d3a7091df | ||
|
|
d6848d53eb | ||
|
|
4b21d23785 | ||
|
|
30815582a7 | ||
|
|
0c515ac034 | ||
|
|
fa6472e2a1 | ||
|
|
fc35a19ede | ||
|
|
79459e8c0a | ||
|
|
5df798c454 | ||
|
|
eebe9c513f | ||
|
|
6990102bb2 | ||
|
|
a5e8e38bc5 | ||
|
|
77217a473d | ||
|
|
6f0246372a | ||
|
|
77aa65f2f2 | ||
|
|
38bed024f2 | ||
|
|
2e687bca5b | ||
|
|
1a1019990a | ||
|
|
1c200bd15f | ||
|
|
40089beac5 | ||
|
|
37bf2cac4f | ||
|
|
5761190e0d | ||
|
|
88f0cfc575 | ||
|
|
6b3c020cd9 | ||
|
|
c058e1cec2 | ||
|
|
dc6a382873 | ||
|
|
bf033294b1 | ||
|
|
fb6a942665 | ||
|
|
df3bae2ce3 | ||
|
|
0cef7e977d | ||
|
|
18a9d47f8e | ||
|
|
ac11e7c32d | ||
|
|
8e1b5e1224 | ||
|
|
e0bd81ce1f | ||
|
|
77598f5d0a | ||
|
|
8142edda01 | ||
|
|
b9871158ba | ||
|
|
8caef2c0c5 | ||
|
|
04542826be | ||
|
|
4ba950a35a | ||
|
|
a55c663848 | ||
|
|
9787227c35 | ||
|
|
ef80a902c8 | ||
|
|
66cdba990a | ||
|
|
82484e8241 | ||
|
|
36fee50f4d | ||
|
|
330083638f | ||
|
|
952d6e43a2 | ||
|
|
b6447462dc | ||
|
|
b190c3e6c3 | ||
|
|
f4db85de40 | ||
|
|
210be6b6ab | ||
|
|
daa79b150f | ||
|
|
db14355367 | ||
|
|
cb83495744 | ||
|
|
f4f300732a | ||
|
|
ccf653c1f4 | ||
|
|
2d6a022bb8 | ||
|
|
2cdf07f12c | ||
|
|
200a520e6c | ||
|
|
4e359db4c7 | ||
|
|
be177f82dc | ||
|
|
339a3e3146 | ||
|
|
a560b28829 | ||
|
|
024109fbeb | ||
|
|
2b25f0dfa0 | ||
|
|
057cceb559 | ||
|
|
ae805b985d | ||
|
|
85e76090ea | ||
|
|
08e7d2407b | ||
|
|
ab2757f64a | ||
|
|
e5617021a7 | ||
|
|
83ba02b431 | ||
|
|
37ecebe45b | ||
|
|
6052ecee07 | ||
|
|
e11ba24ec5 | ||
|
|
f276f21636 | ||
|
|
7126197000 | ||
|
|
afc48e2cd9 | ||
|
|
df52587bef | ||
|
|
35bb10757d | ||
|
|
2a3f54002c | ||
|
|
f3769d45ae | ||
|
|
c200ebc096 | ||
|
|
417f37b2e8 | ||
|
|
7f1973f8ac | ||
|
|
00f7fc324d | ||
|
|
dad3519351 | ||
|
|
d75b4e0f16 | ||
|
|
4d41b2d379 | ||
|
|
d6cf347670 | ||
|
|
6388454375 | ||
|
|
3837fca7a2 | ||
|
|
7529ee2ec7 | ||
|
|
b391c94440 | ||
|
|
5abc4514b7 | ||
|
|
1b2ece3715 | ||
|
|
8ebae74c6f | ||
|
|
fc886dc8c0 | ||
|
|
72346e102d | ||
|
|
918cd25453 | ||
|
|
9767432cff | ||
|
|
0c4dc55a39 | ||
|
|
7b9e8be6e4 | ||
|
|
89307822b0 | ||
|
|
30fe310602 | ||
|
|
ef41b63db7 | ||
|
|
1bceceac5a | ||
|
|
4431779e32 | ||
|
|
131343ed45 | ||
|
|
511b0945c3 | ||
|
|
b7db62411b | ||
|
|
efe9e131a7 | ||
|
|
4a67f60a3b | ||
|
|
a65e0774a5 | ||
|
|
a0b34e8c49 | ||
|
|
fdc1c12fb0 | ||
|
|
0322e2720f | ||
|
|
4f64be4a98 | ||
|
|
e7514cc15e | ||
|
|
6415dc791c | ||
|
|
a5615bd8ea | ||
|
|
4a76f2b8d6 | ||
|
|
9cd6f2ceeb | ||
|
|
2855c73990 | ||
|
|
edcf4d61a4 | ||
|
|
a2a9c598be | ||
|
|
bb06d281ea | ||
|
|
5869234290 | ||
|
|
ecfe4757d3 | ||
|
|
845e296562 | ||
|
|
1988cc5527 | ||
|
|
1d266a6365 | ||
|
|
80522a1b9d | ||
|
|
ecced13d90 | ||
|
|
59510f6449 | ||
|
|
7fc778d251 | ||
|
|
1d490b2311 | ||
|
|
eb3a8be933 | ||
|
|
3ec52088dd | ||
|
|
66b06e416a | ||
|
|
d62315327a | ||
|
|
4bd7b1daf2 | ||
|
|
0d3d022eb1 | ||
|
|
e85cbddd2e | ||
|
|
51ff9f9359 | ||
|
|
0f8b2d8f0a | ||
|
|
9860d59aa2 | ||
|
|
411c71b486 | ||
|
|
dd4fd89dc6 | ||
|
|
653e633c59 | ||
|
|
291b4f0d41 | ||
|
|
88f39c11d4 | ||
|
|
b627fa71e4 | ||
|
|
7dd9553bbb | ||
|
|
b5d64a1e32 | ||
|
|
f9839a0dd9 | ||
|
|
ce1bbc9fa7 | ||
|
|
b114ef26c2 | ||
|
|
3ceef7b17a | ||
|
|
586e6e55f8 | ||
|
|
db81242f4a | ||
|
|
39ca7c7c09 | ||
|
|
ecc0cf8cd6 | ||
|
|
faebe3177b | ||
|
|
474f69c1c0 | ||
|
|
47521693ed | ||
|
|
4d55d61807 | ||
|
|
093fafd6bd | ||
|
|
e3ae2661ee | ||
|
|
7e368f3edf | ||
|
|
138bc028ed | ||
|
|
d53f81b449 | ||
|
|
6f472df0d0 | ||
|
|
21eb944b5e | ||
|
|
95244912c5 | ||
|
|
2617e70008 | ||
|
|
8543485e92 | ||
|
|
ec53c5ca2e | ||
|
|
94d612195a | ||
|
|
b1329db495 | ||
|
|
5bb971d64e | ||
|
|
0364f77b9a | ||
|
|
4ac6a9f089 | ||
|
|
9486d76b2a | ||
|
|
040f736909 | ||
|
|
645e4f6ab9 | ||
|
|
e947cc119b | ||
|
|
53e5d18da5 | ||
|
|
3813c703c9 | ||
|
|
b15204fa8c | ||
|
|
81c75586ab | ||
|
|
556fb1642a | ||
|
|
23aca81943 | ||
|
|
42798e6adc | ||
|
|
b03143dfc8 | ||
|
|
fdacfaabfd | ||
|
|
b2a3981ead | ||
|
|
fe0b616299 | ||
|
|
c4e1cafb63 | ||
|
|
fdf5e4db5e | ||
|
|
d1e86d65dc | ||
|
|
f5b4697c90 | ||
|
|
3be81dd36b | ||
|
|
e6ec2400fc | ||
|
|
5b911e1f9f | ||
|
|
9ea7b5dd38 | ||
|
|
0112a602e1 | ||
|
|
92214578af | ||
|
|
6861259be7 | ||
|
|
11df2ee5d7 | ||
|
|
31a3910fd9 | ||
|
|
381c8fca4f | ||
|
|
4625da3164 | ||
|
|
850f6b1cb9 | ||
|
|
f19b70b379 | ||
|
|
9d0cf08d5f | ||
|
|
2d6fd72177 | ||
|
|
8945fbdb31 | ||
|
|
05ac0e2493 | ||
|
|
bfd45dd671 | ||
|
|
7f80230fd2 | ||
|
|
78bbbccadb | ||
|
|
dbbe032c39 | ||
|
|
cb9473928d | ||
|
|
fa20e37574 | ||
|
|
4911d7ce6f | ||
|
|
e83684b868 | ||
|
|
afbbc61036 | ||
|
|
7ba5c286b7 | ||
|
|
02b28ae0b1 | ||
|
|
0bfbae2d73 | ||
|
|
f1b7dc4064 | ||
|
|
e2a5177e89 | ||
|
|
0c083564ce | ||
|
|
d8dd60dc81 | ||
|
|
73f34eaa5e | ||
|
|
c2496c7ef2 | ||
|
|
ebea298415 | ||
|
|
5ffa20dd82 | ||
|
|
75ea8106ec | ||
|
|
017d3a390d | ||
|
|
589cf1ed21 | ||
|
|
0c82ff3d98 | ||
|
|
8895f28dae | ||
|
|
b6c7c3290f | ||
|
|
fd31fafeee | ||
|
|
db8dd6f380 | ||
|
|
36c20946b4 | ||
|
|
89b5589b1b | ||
|
|
53f438a8a8 | ||
|
|
356439aa33 | ||
|
|
c237a2f5fb | ||
|
|
15d1f85552 | ||
|
|
732acc54c1 | ||
|
|
5d0ecadf7c | ||
|
|
f7995b3c70 | ||
|
|
13e53e5dc8 | ||
|
|
c94b8998be | ||
|
|
218062ceba | ||
|
|
8d295780cb | ||
|
|
a64044a7a9 | ||
|
|
d8939d4162 | ||
|
|
06ce83c912 | ||
|
|
8ace7a7515 | ||
|
|
ef68321b31 | ||
|
|
6064a26963 | ||
|
|
3c9f42a2e2 | ||
|
|
40a68e9077 | ||
|
|
de99ee2c0d | ||
|
|
c79d5a947c | ||
|
|
7ad5a5e847 | ||
|
|
22c890b71c | ||
|
|
83549a8d40 | ||
|
|
98df7db094 | ||
|
|
f0b2e076d9 | ||
|
|
818e341af0 | ||
|
|
dec58092e8 | ||
|
|
0bf70e113f | ||
|
|
31f2cdeb1e | ||
|
|
979fa8b1ba | ||
|
|
bfee412701 | ||
|
|
bfeb428d1b | ||
|
|
b1c2a6384a | ||
|
|
6d01d835a8 | ||
|
|
e42982fb1e | ||
|
|
b45c92e533 | ||
|
|
ba4a96fdb1 | ||
|
|
4d64edf8a5 | ||
|
|
102746bc8f | ||
|
|
887cee64e2 | ||
|
|
2ce973c72f | ||
|
|
9db70f6232 | ||
|
|
b17c24fa38 | ||
|
|
9310949b44 | ||
|
|
d8df5237fa | ||
|
|
c3ca48c62b | ||
|
|
957acb51b5 | ||
|
|
1d23b5d1de | ||
|
|
105b8bb9d3 | ||
|
|
846532112c | ||
|
|
f85a61ceac | ||
|
|
45bf76eb05 | ||
|
|
a415670bc3 | ||
|
|
cf5cfe6d71 | ||
|
|
d733bc54b8 | ||
|
|
814abd9f84 | ||
|
|
75ffe34b17 | ||
|
|
d2aa31f0ce | ||
|
|
22f9ea5fe2 | ||
|
|
d0711d0896 | ||
|
|
271f6a6e99 | ||
|
|
a64dd3ecb5 | ||
|
|
bf46237fc2 | ||
|
|
41d364a8f1 | ||
|
|
fa54a57ca2 | ||
|
|
1c1bb904ed | ||
|
|
b26c837ed6 | ||
|
|
ac9c7e8c4a | ||
|
|
f1b174dc6a | ||
|
|
9d714a8413 | ||
|
|
6c84cbbb58 | ||
|
|
1300dc9239 | ||
|
|
018c8b0e2b | ||
|
|
b52389f228 | ||
|
|
5a123b56e5 | ||
|
|
7456e5b71c | ||
|
|
9798737ec6 | ||
|
|
35ecb139dc | ||
|
|
278d0f117d | ||
|
|
c30b9e6eb1 | ||
|
|
82a4777046 | ||
|
|
6efea43449 | ||
|
|
f14895b48e | ||
|
|
fe15624570 | ||
|
|
ff51e96fbd | ||
|
|
e3cbcc2ea7 | ||
|
|
8d78329991 | ||
|
|
4d8c765485 | ||
|
|
4071ff8c7b | ||
|
|
870ba43a1f | ||
|
|
f5ca897292 | ||
|
|
8bd565e09e | ||
|
|
6033dfdf4a | ||
|
|
14a40c9ca6 | ||
|
|
0f7de84785 | ||
|
|
dd22c87100 | ||
|
|
6fdd9c10d1 | ||
|
|
4158e24e60 | ||
|
|
809acb5fa9 | ||
|
|
299db9d028 | ||
|
|
5a786fab4f | ||
|
|
699f200811 | ||
|
|
881356c417 | ||
|
|
fea4b5f551 | ||
|
|
77107607f3 | ||
|
|
1da963b2f9 | ||
|
|
1ddb9249aa | ||
|
|
0c1228c37a | ||
|
|
3c15874c48 | ||
|
|
93f3f4ab5f | ||
|
|
f6e2e0042d | ||
|
|
b917270c67 | ||
|
|
b067378d0d | ||
|
|
768c8d9972 | ||
|
|
f1d960d2c2 | ||
|
|
cd17802b1f | ||
|
|
10a5d36af8 | ||
|
|
a7ab53c80c | ||
|
|
2672fd09d8 | ||
|
|
4a92799f24 | ||
|
|
5396273541 | ||
|
|
c23c8946a3 | ||
|
|
15b692ccc9 | ||
|
|
3d869cbcde | ||
|
|
68ae020b37 | ||
|
|
d6bb8caad4 | ||
|
|
319402fc74 | ||
|
|
2e4bf7cee4 | ||
|
|
15ed6af5f2 | ||
|
|
50476a7cc7 | ||
|
|
d7ab69f303 | ||
|
|
582620274a | ||
|
|
daeaa767c4 | ||
|
|
f0573f5991 | ||
|
|
07dcf679de | ||
|
|
e0ee138a8b | ||
|
|
d9a1329834 | ||
|
|
8699342249 | ||
|
|
ce8fbbd910 | ||
|
|
1401021b21 | ||
|
|
252b3685a2 | ||
|
|
34d3385b2e | ||
|
|
b00530df2a | ||
|
|
bebf76c461 | ||
|
|
2ceef91da1 | ||
|
|
b7fddfa70d | ||
|
|
d1537a49fa | ||
|
|
856d01ff68 | ||
|
|
42ec79fb0d | ||
|
|
3c4f5af1b9 | ||
|
|
290884ea3b | ||
|
|
965837df53 | ||
|
|
d1a0f2f0eb | ||
|
|
a34e78d084 | ||
|
|
b80fe41af3 | ||
|
|
0d8ced8534 | ||
|
|
7627d85345 | ||
|
|
3f11a647c0 | ||
|
|
e43c413a3f | ||
|
|
8459e0265e | ||
|
|
03a2ce9d13 | ||
|
|
ccf92df4da | ||
|
|
37bc6d9be4 | ||
|
|
177f986795 | ||
|
|
fb1581d0b9 | ||
|
|
02b8e0e5af | ||
|
|
1b16de0d0f | ||
|
|
069b5b0a06 | ||
|
|
b05e94e4ff | ||
|
|
0acf9ace9a | ||
|
|
ca85646df4 | ||
|
|
7b9057ad01 | ||
|
|
96f65fad68 | ||
|
|
9cada8b59d | ||
|
|
66a5159511 | ||
|
|
d1a0a907ff | ||
|
|
1b780fa752 | ||
|
|
38022ff11c | ||
|
|
1b9b9d60d4 | ||
|
|
68141a924d | ||
|
|
764d27f696 | ||
|
|
b23742e09c | ||
|
|
5e514b8465 | ||
|
|
a60f687ce2 | ||
|
|
8dae879994 | ||
|
|
d19c5248c9 | ||
|
|
1360361f60 | ||
|
|
000eb1b069 | ||
|
|
f51b48fa49 | ||
|
|
9f906ff236 | ||
|
|
c79dd8d458 | ||
|
|
ec4ecdd543 | ||
|
|
20a4d817ce | ||
|
|
5ebf7e5619 | ||
|
|
0692fffbf3 | ||
|
|
093570af20 | ||
|
|
eb403da814 | ||
|
|
f3ad635911 | ||
|
|
a8d7360881 | ||
|
|
b0311cfdeb | ||
|
|
412e0aa985 | ||
|
|
965b4f4ae2 | ||
|
|
95018672fa | ||
|
|
2caece2077 | ||
|
|
b8b8c19fb4 | ||
|
|
225add041f | ||
|
|
5d001b1e5a | ||
|
|
fe462de85b | ||
|
|
c0de7f5cd8 | ||
|
|
b220ba6cd1 | ||
|
|
7de373210d | ||
|
|
5c5b03ce08 | ||
|
|
d7d3f451f0 | ||
|
|
bc7d3c6476 | ||
|
|
e3d75879c0 | ||
|
|
485b269674 | ||
|
|
ee1eda9921 | ||
|
|
e363911c85 | ||
|
|
d5d690c044 | ||
|
|
8f557477c6 | ||
|
|
af210c8b42 | ||
|
|
2153d2e00a | ||
|
|
564fa11244 | ||
|
|
8d28a24b26 | ||
|
|
53128d56d9 | ||
|
|
40799d8ae7 | ||
|
|
b242b0ad67 | ||
|
|
d90cd36bcc | ||
|
|
956b6f17ca | ||
|
|
6f9af0aa8c | ||
|
|
8e6b27bf7c | ||
|
|
ae3eff1ad2 | ||
|
|
501702b27c | ||
|
|
526f8b76aa | ||
|
|
a1b062123b | ||
|
|
a4d5c8085b | ||
|
|
edffe0dd9d | ||
|
|
d9c518b2cc | ||
|
|
0d3aefb274 | ||
|
|
6139e8e426 | ||
|
|
d9ba3c5f5e | ||
|
|
0cf7fd0fb8 | ||
|
|
f0b41e7750 | ||
|
|
5082d84f5b | ||
|
|
7991bd3b69 | ||
|
|
ddbdcdddd7 | ||
|
|
7b182e2605 | ||
|
|
1d9d7c02db | ||
|
|
a974602f9f | ||
|
|
a839860c2e | ||
|
|
a5ce2b5330 | ||
|
|
3569c1bacd | ||
|
|
86681b92aa | ||
|
|
eb21d9969d | ||
|
|
e6618f1cc0 | ||
|
|
eaff14da5f | ||
|
|
f383b4d540 | ||
|
|
694150ce40 | ||
|
|
f4359b688c | ||
|
|
948f047f0a | ||
|
|
4175cfbdac | ||
|
|
9657459d80 | ||
|
|
a4256b3250 | ||
|
|
175a577ad4 | ||
|
|
1fdf01e3bc | ||
|
|
446a39e969 | ||
|
|
7ed9eb4a56 | ||
|
|
f07d6433b6 | ||
|
|
2040db98ef | ||
|
|
371493ae32 | ||
|
|
1b9e5e84aa | ||
|
|
7ed93fff06 | ||
|
|
a6dffb6ef9 | ||
|
|
c5c14368e3 | ||
|
|
1254dc7ee2 | ||
|
|
fcb905f519 | ||
|
|
58fa4f0eb7 | ||
|
|
877a2d70e3 | ||
|
|
959f5c6f40 | ||
|
|
678fe0684f | ||
|
|
c9821f13e0 | ||
|
|
121d535068 | ||
|
|
ec3a3aed37 | ||
|
|
87cd2bae77 | ||
|
|
be81db21b9 | ||
|
|
f2d89761c2 | ||
|
|
a0372158a0 | ||
|
|
83048a4adc | ||
|
|
f71b1b174d | ||
|
|
96e78394f5 | ||
|
|
ada933eb42 | ||
|
|
f6a10f4693 | ||
|
|
d25307dced | ||
|
|
2759f1a22e | ||
|
|
f474495ba0 | ||
|
|
bf1c36a30c | ||
|
|
567b71c1d2 | ||
|
|
f3dadfb3d0 | ||
|
|
ea0278cf27 | ||
|
|
f1aece1ba0 | ||
|
|
590695e845 | ||
|
|
9bb6a6c77c | ||
|
|
2309dd5646 | ||
|
|
847fc566fd | ||
|
|
a058bc6de8 | ||
|
|
895f929bce | ||
|
|
a7d8bfa631 | ||
|
|
0806a46c0c | ||
|
|
5e08b35f53 | ||
|
|
82cbcb36ab | ||
|
|
ec0e641578 | ||
|
|
20b38acff0 | ||
|
|
c61bc25ef9 | ||
|
|
7bb13569b3 | ||
|
|
5fc233964a | ||
|
|
5ee77c0b1f | ||
|
|
ddb9c2fe94 | ||
|
|
67d418e91c | ||
|
|
4d291d0e90 | ||
|
|
4718c67c17 | ||
|
|
c5ca7d0c68 | ||
|
|
0ec84e2f1f | ||
|
|
8342e9ea6f | ||
|
|
99399c112a | ||
|
|
2388981311 | ||
|
|
fb721cdfa5 | ||
|
|
bf63f129ae | ||
|
|
2ecd0e1f00 | ||
|
|
b858d70f19 | ||
|
|
0c0e15b81d | ||
|
|
3e94fd5af3 | ||
|
|
006ee5f94a | ||
|
|
4bcbb7793d | ||
|
|
dc64962ffc | ||
|
|
cd5732d9d8 | ||
|
|
0a09589403 | ||
|
|
e3efb0d854 | ||
|
|
4b8dbea5c1 | ||
|
|
0c7276ae13 | ||
|
|
00f1f54b7a | ||
|
|
8963d830fb | ||
|
|
01b4b0c2f3 | ||
|
|
dee71404a2 | ||
|
|
572332ab50 | ||
|
|
5223b62a19 | ||
|
|
bc4f594ed6 | ||
|
|
ea6f41324a | ||
|
|
3d5faa0295 | ||
|
|
9fbef1159f |
@@ -14,3 +14,4 @@ opt-level = 1
|
|||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
hakari-package = "workspace_hack"
|
hakari-package = "workspace_hack"
|
||||||
|
|
||||||
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
||||||
dep-format-version = "3"
|
dep-format-version = "4"
|
||||||
|
|
||||||
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
||||||
# Hakari works much better with the new feature resolver.
|
# Hakari works much better with the new feature resolver.
|
||||||
|
|||||||
@@ -15,8 +15,10 @@
|
|||||||
!proxy/
|
!proxy/
|
||||||
!safekeeper/
|
!safekeeper/
|
||||||
!storage_broker/
|
!storage_broker/
|
||||||
|
!trace/
|
||||||
!vendor/postgres-v14/
|
!vendor/postgres-v14/
|
||||||
!vendor/postgres-v15/
|
!vendor/postgres-v15/
|
||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
!neon_local/
|
!neon_local/
|
||||||
!scripts/ninstall.sh
|
!scripts/ninstall.sh
|
||||||
|
!vm-cgconfig.conf
|
||||||
|
|||||||
1
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
1
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
@@ -10,6 +10,7 @@
|
|||||||
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
||||||
|
|
||||||
### Checklist after release
|
### Checklist after release
|
||||||
|
- [ ] Make sure instructions from PRs included in this release and labeled `manual_release_instructions` are executed (either by you or by people who wrote them).
|
||||||
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
||||||
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
||||||
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
||||||
|
|||||||
186
.github/actions/allure-report-generate/action.yml
vendored
Normal file
186
.github/actions/allure-report-generate/action.yml
vendored
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
name: 'Create Allure report'
|
||||||
|
description: 'Generate Allure report from uploaded by actions/allure-report-store tests results'
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
report-url:
|
||||||
|
description: 'Allure report URL'
|
||||||
|
value: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
report-json-url:
|
||||||
|
description: 'Allure report JSON URL'
|
||||||
|
value: ${{ steps.generate-report.outputs.report-json-url }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# We're using some of env variables quite offen, so let's set them once.
|
||||||
|
#
|
||||||
|
# It would be nice to have them set in common runs.env[0] section, but it doesn't work[1]
|
||||||
|
#
|
||||||
|
# - [0] https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#runsenv
|
||||||
|
# - [1] https://github.com/neondatabase/neon/pull/3907#discussion_r1154703456
|
||||||
|
#
|
||||||
|
- name: Set variables
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||||
|
# Shortcut for special branches
|
||||||
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
|
else
|
||||||
|
BRANCH_OR_PR=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||||
|
fi
|
||||||
|
|
||||||
|
LOCK_FILE=reports/${BRANCH_OR_PR}/lock.txt
|
||||||
|
|
||||||
|
WORKDIR=/tmp/${BRANCH_OR_PR}-$(date +%s)
|
||||||
|
mkdir -p ${WORKDIR}
|
||||||
|
|
||||||
|
echo "BRANCH_OR_PR=${BRANCH_OR_PR}" >> $GITHUB_ENV
|
||||||
|
echo "LOCK_FILE=${LOCK_FILE}" >> $GITHUB_ENV
|
||||||
|
echo "WORKDIR=${WORKDIR}" >> $GITHUB_ENV
|
||||||
|
echo "BUCKET=${BUCKET}" >> $GITHUB_ENV
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
|
||||||
|
# TODO: We can replace with a special docker image with Java and Allure pre-installed
|
||||||
|
- uses: actions/setup-java@v3
|
||||||
|
with:
|
||||||
|
distribution: 'temurin'
|
||||||
|
java-version: '17'
|
||||||
|
|
||||||
|
- name: Install Allure
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if ! which allure; then
|
||||||
|
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip
|
||||||
|
wget -q https://github.com/allure-framework/allure2/releases/download/${ALLURE_VERSION}/${ALLURE_ZIP}
|
||||||
|
echo "${ALLURE_ZIP_SHA256} ${ALLURE_ZIP}" | sha256sum --check
|
||||||
|
unzip -q ${ALLURE_ZIP}
|
||||||
|
echo "$(pwd)/allure-${ALLURE_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
rm -f ${ALLURE_ZIP}
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
ALLURE_VERSION: 2.22.1
|
||||||
|
ALLURE_ZIP_SHA256: fdc7a62d94b14c5e0bf25198ae1feded6b005fdbed864b4d3cb4e5e901720b0b
|
||||||
|
|
||||||
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
- name: Acquire lock
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
LOCK_TIMEOUT=300 # seconds
|
||||||
|
|
||||||
|
LOCK_CONTENT="${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}"
|
||||||
|
echo ${LOCK_CONTENT} > ${WORKDIR}/lock.txt
|
||||||
|
|
||||||
|
# Do it up to 5 times to avoid race condition
|
||||||
|
for _ in $(seq 1 5); do
|
||||||
|
for i in $(seq 1 ${LOCK_TIMEOUT}); do
|
||||||
|
LOCK_ACQUIRED=$(aws s3api head-object --bucket neon-github-public-dev --key ${LOCK_FILE} | jq --raw-output '.LastModified' || true)
|
||||||
|
# `date --date="..."` is supported only by gnu date (i.e. it doesn't work on BSD/macOS)
|
||||||
|
if [ -z "${LOCK_ACQUIRED}" ] || [ "$(( $(date +%s) - $(date --date="${LOCK_ACQUIRED}" +%s) ))" -gt "${LOCK_TIMEOUT}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
aws s3 mv --only-show-errors ${WORKDIR}/lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
|
||||||
|
# Double-check that exactly THIS run has acquired the lock
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||||
|
if [ "$(cat lock.txt)" = "${LOCK_CONTENT}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Generate and publish final Allure report
|
||||||
|
id: generate-report
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
REPORT_PREFIX=reports/${BRANCH_OR_PR}
|
||||||
|
RAW_PREFIX=reports-raw/${BRANCH_OR_PR}/${GITHUB_RUN_ID}
|
||||||
|
|
||||||
|
# Get previously uploaded data for this run
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
S3_FILEPATHS=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/ | jq --raw-output '.Contents[].Key')
|
||||||
|
if [ -z "$S3_FILEPATHS" ]; then
|
||||||
|
# There's no previously uploaded data for this $GITHUB_RUN_ID
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
for S3_FILEPATH in ${S3_FILEPATHS}; do
|
||||||
|
time aws s3 cp --only-show-errors "s3://${BUCKET}/${S3_FILEPATH}" "${WORKDIR}"
|
||||||
|
|
||||||
|
archive=${WORKDIR}/$(basename $S3_FILEPATH)
|
||||||
|
mkdir -p ${archive%.tar.zst}
|
||||||
|
time tar -xf ${archive} -C ${archive%.tar.zst}
|
||||||
|
rm -f ${archive}
|
||||||
|
done
|
||||||
|
|
||||||
|
# Get history trend
|
||||||
|
time aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${REPORT_PREFIX}/latest/history" "${WORKDIR}/latest/history" || true
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
time allure generate --clean --output ${WORKDIR}/report ${WORKDIR}/*
|
||||||
|
|
||||||
|
# Replace a logo link with a redirect to the latest version of the report
|
||||||
|
sed -i 's|<a href="." class=|<a href="https://'${BUCKET}'.s3.amazonaws.com/'${REPORT_PREFIX}'/latest/index.html?nocache='"'+Date.now()+'"'" class=|g' ${WORKDIR}/report/app.js
|
||||||
|
|
||||||
|
# Upload a history and the final report (in this particular order to not to have duplicated history in 2 places)
|
||||||
|
time aws s3 mv --recursive --only-show-errors "${WORKDIR}/report/history" "s3://${BUCKET}/${REPORT_PREFIX}/latest/history"
|
||||||
|
time aws s3 mv --recursive --only-show-errors "${WORKDIR}/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html
|
||||||
|
|
||||||
|
# Generate redirect
|
||||||
|
cat <<EOF > ${WORKDIR}/index.html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Redirecting to ${REPORT_URL}</title>
|
||||||
|
<meta http-equiv="refresh" content="0; URL=${REPORT_URL}">
|
||||||
|
EOF
|
||||||
|
time aws s3 cp --only-show-errors ${WORKDIR}/index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
||||||
|
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
echo "report-json-url=${REPORT_URL%/index.html}/data/suites.json" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
||||||
|
|
||||||
|
- name: Release lock
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||||
|
|
||||||
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}" ]; then
|
||||||
|
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if [ -d "${WORKDIR}" ]; then
|
||||||
|
rm -rf ${WORKDIR}
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/github-script@v6
|
||||||
|
if: always()
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { REPORT_URL, COMMIT_SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${COMMIT_SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: 'Allure report',
|
||||||
|
})
|
||||||
72
.github/actions/allure-report-store/action.yml
vendored
Normal file
72
.github/actions/allure-report-store/action.yml
vendored
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
name: 'Store Allure results'
|
||||||
|
description: 'Upload test results to be used by actions/allure-report-generate'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
report-dir:
|
||||||
|
description: 'directory with test results generated by tests'
|
||||||
|
required: true
|
||||||
|
unique-key:
|
||||||
|
description: 'string to distinguish different results in the same run'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Set variables
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||||
|
# Shortcut for special branches
|
||||||
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
|
else
|
||||||
|
BRANCH_OR_PR=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "BRANCH_OR_PR=${BRANCH_OR_PR}" >> $GITHUB_ENV
|
||||||
|
echo "REPORT_DIR=${REPORT_DIR}" >> $GITHUB_ENV
|
||||||
|
env:
|
||||||
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
REPORT_PREFIX=reports/${BRANCH_OR_PR}
|
||||||
|
RAW_PREFIX=reports-raw/${BRANCH_OR_PR}/${GITHUB_RUN_ID}
|
||||||
|
|
||||||
|
# Add metadata
|
||||||
|
cat <<EOF > ${REPORT_DIR}/executor.json
|
||||||
|
{
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/latest/index.html",
|
||||||
|
"buildOrder": ${GITHUB_RUN_ID},
|
||||||
|
"buildName": "GitHub Actions Run #${GITHUB_RUN_NUMBER}/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"buildUrl": "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/attempts/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"reportUrl": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html",
|
||||||
|
"reportName": "Allure Report"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF > ${REPORT_DIR}/environment.properties
|
||||||
|
COMMIT_SHA=${COMMIT_SHA}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
ARCHIVE="${UNIQUE_KEY}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
time tar -C ${REPORT_DIR} -cf ${ARCHIVE} --zstd .
|
||||||
|
time aws s3 mv --only-show-errors ${ARCHIVE} "s3://${BUCKET}/${RAW_PREFIX}/${ARCHIVE}"
|
||||||
|
env:
|
||||||
|
UNIQUE_KEY: ${{ inputs.unique-key }}
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
rm -rf ${REPORT_DIR}
|
||||||
232
.github/actions/allure-report/action.yml
vendored
232
.github/actions/allure-report/action.yml
vendored
@@ -1,232 +0,0 @@
|
|||||||
name: 'Create Allure report'
|
|
||||||
description: 'Create and publish Allure report'
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
action:
|
|
||||||
desctiption: 'generate or store'
|
|
||||||
required: true
|
|
||||||
build_type:
|
|
||||||
description: '`build_type` from run-python-test-set action'
|
|
||||||
required: true
|
|
||||||
test_selection:
|
|
||||||
description: '`test_selector` from run-python-test-set action'
|
|
||||||
required: false
|
|
||||||
outputs:
|
|
||||||
report-url:
|
|
||||||
description: 'Allure report URL'
|
|
||||||
value: ${{ steps.generate-report.outputs.report-url }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Validate input parameters
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
if [ "${{ inputs.action }}" != "store" ] && [ "${{ inputs.action }}" != "generate" ]; then
|
|
||||||
echo 2>&1 "Unknown inputs.action type '${{ inputs.action }}'; allowed 'generate' or 'store' only"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${{ inputs.test_selection }}" ] && [ "${{ inputs.action }}" == "store" ]; then
|
|
||||||
echo 2>&1 "inputs.test_selection must be set for 'store' action"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Calculate variables
|
|
||||||
id: calculate-vars
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
|
||||||
|
|
||||||
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
|
||||||
if [ "${pr_number}" != "null" ]; then
|
|
||||||
key=pr-${pr_number}
|
|
||||||
elif [ "${GITHUB_REF_NAME}" = "main" ]; then
|
|
||||||
# Shortcut for a special branch
|
|
||||||
key=main
|
|
||||||
elif [ "${GITHUB_REF_NAME}" = "release" ]; then
|
|
||||||
# Shortcut for a special branch
|
|
||||||
key=release
|
|
||||||
else
|
|
||||||
key=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
|
||||||
fi
|
|
||||||
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Sanitize test selection to remove `/` and any other special characters
|
|
||||||
# Use printf instead of echo to avoid having `\n` at the end of the string
|
|
||||||
test_selection=$(printf "${{ inputs.test_selection }}" | tr -c "[:alnum:]._-" "-" )
|
|
||||||
echo "TEST_SELECTION=${test_selection}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- uses: actions/setup-java@v3
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
with:
|
|
||||||
distribution: 'temurin'
|
|
||||||
java-version: '17'
|
|
||||||
|
|
||||||
- name: Install Allure
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
if ! which allure; then
|
|
||||||
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip
|
|
||||||
wget -q https://github.com/allure-framework/allure2/releases/download/${ALLURE_VERSION}/${ALLURE_ZIP}
|
|
||||||
echo "${ALLURE_ZIP_MD5} ${ALLURE_ZIP}" | md5sum -c
|
|
||||||
unzip -q ${ALLURE_ZIP}
|
|
||||||
echo "$(pwd)/allure-${ALLURE_VERSION}/bin" >> $GITHUB_PATH
|
|
||||||
rm -f ${ALLURE_ZIP}
|
|
||||||
fi
|
|
||||||
env:
|
|
||||||
ALLURE_VERSION: 2.19.0
|
|
||||||
ALLURE_ZIP_MD5: ced21401a1a8b9dfb68cee9e4c210464
|
|
||||||
|
|
||||||
- name: Upload Allure results
|
|
||||||
if: ${{ inputs.action == 'store' }}
|
|
||||||
env:
|
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# Add metadata
|
|
||||||
cat <<EOF > $TEST_OUTPUT/allure/results/executor.json
|
|
||||||
{
|
|
||||||
"name": "GitHub Actions",
|
|
||||||
"type": "github",
|
|
||||||
"url": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/latest/index.html",
|
|
||||||
"buildOrder": ${GITHUB_RUN_ID},
|
|
||||||
"buildName": "GitHub Actions Run #${{ github.run_number }}/${GITHUB_RUN_ATTEMPT}",
|
|
||||||
"buildUrl": "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/attempts/${GITHUB_RUN_ATTEMPT}",
|
|
||||||
"reportUrl": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html",
|
|
||||||
"reportName": "Allure Report"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
cat <<EOF > $TEST_OUTPUT/allure/results/environment.properties
|
|
||||||
TEST_SELECTION=${{ inputs.test_selection }}
|
|
||||||
BUILD_TYPE=${{ inputs.build_type }}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
ARCHIVE="${GITHUB_RUN_ID}-${TEST_SELECTION}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
|
||||||
ZSTD_NBTHREADS=0
|
|
||||||
|
|
||||||
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
|
||||||
aws s3 mv --only-show-errors ${ARCHIVE} "s3://${BUCKET}/${RAW_PREFIX}/${ARCHIVE}"
|
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example for the main branch), so we use improvised lock for this
|
|
||||||
- name: Acquire Allure lock
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
env:
|
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
|
||||||
LOCK_TIMEOUT=300 # seconds
|
|
||||||
|
|
||||||
for _ in $(seq 1 5); do
|
|
||||||
for i in $(seq 1 ${LOCK_TIMEOUT}); do
|
|
||||||
LOCK_ADDED=$(aws s3api head-object --bucket neon-github-public-dev --key ${LOCK_FILE} | jq --raw-output '.LastModified' || true)
|
|
||||||
# `date --date="..."` is supported only by gnu date (i.e. it doesn't work on BSD/macOS)
|
|
||||||
if [ -z "${LOCK_ADDED}" ] || [ "$(( $(date +%s) - $(date --date="${LOCK_ADDED}" +%s) ))" -gt "${LOCK_TIMEOUT}" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" > lock.txt
|
|
||||||
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
|
||||||
|
|
||||||
# A double-check that exactly WE have acquired the lock
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Generate and publish final Allure report
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
id: generate-report
|
|
||||||
env:
|
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# Get previously uploaded data for this run
|
|
||||||
ZSTD_NBTHREADS=0
|
|
||||||
|
|
||||||
s3_filepaths=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/${GITHUB_RUN_ID}- | jq --raw-output '.Contents[].Key')
|
|
||||||
if [ -z "$s3_filepaths" ]; then
|
|
||||||
# There's no previously uploaded data for this run
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
for s3_filepath in ${s3_filepaths}; do
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${s3_filepath}" "${TEST_OUTPUT}/allure/"
|
|
||||||
|
|
||||||
archive=${TEST_OUTPUT}/allure/$(basename $s3_filepath)
|
|
||||||
mkdir -p ${archive%.tar.zst}
|
|
||||||
tar -xf ${archive} -C ${archive%.tar.zst}
|
|
||||||
rm -f ${archive}
|
|
||||||
done
|
|
||||||
|
|
||||||
# Get history trend
|
|
||||||
aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${REPORT_PREFIX}/latest/history" "${TEST_OUTPUT}/allure/latest/history" || true
|
|
||||||
|
|
||||||
# Generate report
|
|
||||||
allure generate --clean --output $TEST_OUTPUT/allure/report $TEST_OUTPUT/allure/*
|
|
||||||
|
|
||||||
# Replace a logo link with a redirect to the latest version of the report
|
|
||||||
sed -i 's|<a href="." class=|<a href="https://'${BUCKET}'.s3.amazonaws.com/'${REPORT_PREFIX}'/latest/index.html" class=|g' $TEST_OUTPUT/allure/report/app.js
|
|
||||||
|
|
||||||
# Upload a history and the final report (in this particular order to not to have duplicated history in 2 places)
|
|
||||||
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report/history" "s3://${BUCKET}/${REPORT_PREFIX}/latest/history"
|
|
||||||
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
|
||||||
|
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html
|
|
||||||
|
|
||||||
# Generate redirect
|
|
||||||
cat <<EOF > ./index.html
|
|
||||||
<!DOCTYPE html>
|
|
||||||
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<title>Redirecting to ${REPORT_URL}</title>
|
|
||||||
<meta http-equiv="refresh" content="0; URL=${REPORT_URL}">
|
|
||||||
EOF
|
|
||||||
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
|
||||||
|
|
||||||
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
|
||||||
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Release Allure lock
|
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
env:
|
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
|
||||||
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
|
||||||
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- uses: actions/github-script@v6
|
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
|
||||||
env:
|
|
||||||
REPORT_URL: ${{ steps.generate-report.outputs.report-url }}
|
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const { REPORT_URL, BUILD_TYPE, SHA } = process.env
|
|
||||||
|
|
||||||
await github.rest.repos.createCommitStatus({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
sha: `${SHA}`,
|
|
||||||
state: 'success',
|
|
||||||
target_url: `${REPORT_URL}`,
|
|
||||||
context: `Allure report / ${BUILD_TYPE}`,
|
|
||||||
})
|
|
||||||
2
.github/actions/download/action.yml
vendored
2
.github/actions/download/action.yml
vendored
@@ -37,7 +37,7 @@ runs:
|
|||||||
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
echo >&2 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
echo >&2 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
echo >&2 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
echo >&2 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
env:
|
env:
|
||||||
|
|||||||
16
.github/actions/neon-project-create/action.yml
vendored
16
.github/actions/neon-project-create/action.yml
vendored
@@ -14,6 +14,12 @@ inputs:
|
|||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
desctiption: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console.stage.neon.tech
|
||||||
|
provisioner:
|
||||||
|
desctiption: 'k8s-pod or k8s-neonvm'
|
||||||
|
default: 'k8s-pod'
|
||||||
|
compute_units:
|
||||||
|
desctiption: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal'
|
||||||
|
default: '[1, 1]'
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
@@ -31,6 +37,10 @@ runs:
|
|||||||
# A shell without `set -x` to not to expose password/dsn in logs
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
shell: bash -euo pipefail {0}
|
shell: bash -euo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
|
if [ "${PROVISIONER}" == "k8s-pod" ] && [ "${MIN_CU}" != "${MAX_CU}" ]; then
|
||||||
|
echo >&2 "For k8s-pod provisioner MIN_CU should be equal to MAX_CU"
|
||||||
|
fi
|
||||||
|
|
||||||
project=$(curl \
|
project=$(curl \
|
||||||
"https://${API_HOST}/api/v2/projects" \
|
"https://${API_HOST}/api/v2/projects" \
|
||||||
--fail \
|
--fail \
|
||||||
@@ -42,6 +52,9 @@ runs:
|
|||||||
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||||
\"pg_version\": ${POSTGRES_VERSION},
|
\"pg_version\": ${POSTGRES_VERSION},
|
||||||
\"region_id\": \"${REGION_ID}\",
|
\"region_id\": \"${REGION_ID}\",
|
||||||
|
\"provisioner\": \"${PROVISIONER}\",
|
||||||
|
\"autoscaling_limit_min_cu\": ${MIN_CU},
|
||||||
|
\"autoscaling_limit_max_cu\": ${MAX_CU},
|
||||||
\"settings\": { }
|
\"settings\": { }
|
||||||
}
|
}
|
||||||
}")
|
}")
|
||||||
@@ -62,3 +75,6 @@ runs:
|
|||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
REGION_ID: ${{ inputs.region_id }}
|
REGION_ID: ${{ inputs.region_id }}
|
||||||
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
||||||
|
PROVISIONER: ${{ inputs.provisioner }}
|
||||||
|
MIN_CU: ${{ fromJSON(inputs.compute_units)[0] }}
|
||||||
|
MAX_CU: ${{ fromJSON(inputs.compute_units)[1] }}
|
||||||
|
|||||||
56
.github/actions/run-python-test-set/action.yml
vendored
56
.github/actions/run-python-test-set/action.yml
vendored
@@ -36,14 +36,14 @@ inputs:
|
|||||||
description: 'Region name for real s3 tests'
|
description: 'Region name for real s3 tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
real_s3_access_key_id:
|
rerun_flaky:
|
||||||
description: 'Access key id'
|
description: 'Whether to rerun flaky tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: 'false'
|
||||||
real_s3_secret_access_key:
|
pg_version:
|
||||||
description: 'Secret access key'
|
description: 'Postgres version to use for tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: 'v14'
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -63,12 +63,12 @@ runs:
|
|||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Download compatibility snapshot for Postgres 14
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }}
|
||||||
path: /tmp/compatibility_snapshot_pg14
|
path: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -96,18 +96,18 @@ runs:
|
|||||||
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
|
||||||
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg14
|
|
||||||
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
||||||
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
||||||
|
RERUN_FLAKY: ${{ inputs.rerun_flaky }}
|
||||||
|
PG_VERSION: ${{ inputs.pg_version }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# PLATFORM will be embedded in the perf test report
|
# PLATFORM will be embedded in the perf test report
|
||||||
# and it is needed to distinguish different environments
|
# and it is needed to distinguish different environments
|
||||||
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
|
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
|
||||||
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
|
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
|
||||||
export DEFAULT_PG_VERSION=${DEFAULT_PG_VERSION:-14}
|
export DEFAULT_PG_VERSION=${PG_VERSION#v}
|
||||||
|
|
||||||
if [ "${BUILD_TYPE}" = "remote" ]; then
|
if [ "${BUILD_TYPE}" = "remote" ]; then
|
||||||
export REMOTE_ENV=1
|
export REMOTE_ENV=1
|
||||||
@@ -123,8 +123,8 @@ runs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
||||||
# -n4 uses four processes to run tests via pytest-xdist
|
# -n16 uses sixteen processes to run tests via pytest-xdist
|
||||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
EXTRA_PARAMS="-n16 $EXTRA_PARAMS"
|
||||||
|
|
||||||
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
||||||
# to the same worker to make @pytest.mark.order work with xdist
|
# to the same worker to make @pytest.mark.order work with xdist
|
||||||
@@ -143,6 +143,13 @@ runs:
|
|||||||
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "${RERUN_FLAKY}" == "true" ]; then
|
||||||
|
mkdir -p $TEST_OUTPUT
|
||||||
|
poetry run ./scripts/flaky_tests.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/flaky.json"
|
||||||
|
|
||||||
|
EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
||||||
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
||||||
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
||||||
@@ -180,19 +187,18 @@ runs:
|
|||||||
scripts/generate_and_push_perf_report.sh
|
scripts/generate_and_push_perf_report.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload compatibility snapshot for Postgres 14
|
- name: Upload compatibility snapshot
|
||||||
if: github.ref_name == 'release'
|
if: github.ref_name == 'release'
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14-${{ github.run_id }}
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }}-${{ github.run_id }}
|
||||||
# The path includes a test name (test_create_snapshot) and directory that the test creates (compatibility_snapshot_pg14), keep the path in sync with the test
|
# Directory is created by test_compatibility.py::test_create_snapshot, keep the path in sync with the test
|
||||||
path: /tmp/test_output/test_create_snapshot/compatibility_snapshot_pg14/
|
path: /tmp/test_output/compatibility_snapshot_pg${{ inputs.pg_version }}/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Upload test results
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
action: store
|
report-dir: /tmp/test_output/allure/results
|
||||||
build_type: ${{ inputs.build_type }}
|
unique-key: ${{ inputs.build_type }}
|
||||||
test_selection: ${{ inputs.test_selection }}
|
|
||||||
|
|||||||
6
.github/actions/upload/action.yml
vendored
6
.github/actions/upload/action.yml
vendored
@@ -23,7 +23,7 @@ runs:
|
|||||||
mkdir -p $(dirname $ARCHIVE)
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
|
|
||||||
if [ -f ${ARCHIVE} ]; then
|
if [ -f ${ARCHIVE} ]; then
|
||||||
echo 2>&1 "File ${ARCHIVE} already exist. Something went wrong before"
|
echo >&2 "File ${ARCHIVE} already exist. Something went wrong before"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -33,10 +33,10 @@ runs:
|
|||||||
elif [ -f ${SOURCE} ]; then
|
elif [ -f ${SOURCE} ]; then
|
||||||
time tar -cf ${ARCHIVE} --zstd ${SOURCE}
|
time tar -cf ${ARCHIVE} --zstd ${SOURCE}
|
||||||
elif ! ls ${SOURCE} > /dev/null 2>&1; then
|
elif ! ls ${SOURCE} > /dev/null 2>&1; then
|
||||||
echo 2>&1 "${SOURCE} does not exist"
|
echo >&2 "${SOURCE} does not exist"
|
||||||
exit 2
|
exit 2
|
||||||
else
|
else
|
||||||
echo 2>&1 "${SOURCE} is neither a directory nor a file, do not know how to handle it"
|
echo >&2 "${SOURCE} is neither a directory nor a file, do not know how to handle it"
|
||||||
exit 3
|
exit 3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
5
.github/ansible/.gitignore
vendored
5
.github/ansible/.gitignore
vendored
@@ -1,5 +0,0 @@
|
|||||||
neon_install.tar.gz
|
|
||||||
.neon_current_version
|
|
||||||
|
|
||||||
collections/*
|
|
||||||
!collections/.keep
|
|
||||||
12
.github/ansible/ansible.cfg
vendored
12
.github/ansible/ansible.cfg
vendored
@@ -1,12 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
|
|
||||||
localhost_warning = False
|
|
||||||
host_key_checking = False
|
|
||||||
timeout = 30
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
ssh_args = -F ./ansible.ssh.cfg
|
|
||||||
# teleport doesn't support sftp yet https://github.com/gravitational/teleport/issues/7127
|
|
||||||
# and scp neither worked for me
|
|
||||||
transfer_method = piped
|
|
||||||
pipelining = True
|
|
||||||
15
.github/ansible/ansible.ssh.cfg
vendored
15
.github/ansible/ansible.ssh.cfg
vendored
@@ -1,15 +0,0 @@
|
|||||||
# Remove this once https://github.com/gravitational/teleport/issues/10918 is fixed
|
|
||||||
# (use pre 8.5 option name to cope with old ssh in CI)
|
|
||||||
PubkeyAcceptedKeyTypes +ssh-rsa-cert-v01@openssh.com
|
|
||||||
|
|
||||||
Host tele.zenith.tech
|
|
||||||
User admin
|
|
||||||
Port 3023
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
UserKnownHostsFile /dev/null
|
|
||||||
|
|
||||||
Host * !tele.zenith.tech
|
|
||||||
User admin
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
UserKnownHostsFile /dev/null
|
|
||||||
ProxyJump tele.zenith.tech
|
|
||||||
193
.github/ansible/deploy.yaml
vendored
193
.github/ansible/deploy.yaml
vendored
@@ -1,193 +0,0 @@
|
|||||||
- name: Upload Neon binaries
|
|
||||||
hosts: storage
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: get latest version of Neon binaries
|
|
||||||
register: current_version_file
|
|
||||||
set_fact:
|
|
||||||
current_version: "{{ lookup('file', '.neon_current_version') | trim }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: inform about versions
|
|
||||||
debug:
|
|
||||||
msg: "Version to deploy - {{ current_version }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: upload and extract Neon binaries to /usr/local
|
|
||||||
ansible.builtin.unarchive:
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
src: neon_install.tar.gz
|
|
||||||
dest: /usr/local
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
- binaries
|
|
||||||
- putbinaries
|
|
||||||
|
|
||||||
- name: Deploy pageserver
|
|
||||||
hosts: pageservers
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: upload init script
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: scripts/init_pageserver.sh
|
|
||||||
dest: /tmp/init_pageserver.sh
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: init pageserver
|
|
||||||
shell:
|
|
||||||
cmd: /tmp/init_pageserver.sh
|
|
||||||
args:
|
|
||||||
creates: "/storage/pageserver/data/tenants"
|
|
||||||
environment:
|
|
||||||
NEON_REPO_DIR: "/storage/pageserver/data"
|
|
||||||
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: read the existing remote pageserver config
|
|
||||||
ansible.builtin.slurp:
|
|
||||||
src: /storage/pageserver/data/pageserver.toml
|
|
||||||
register: _remote_ps_config
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: parse the existing pageserver configuration
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: construct the final pageserver configuration dict
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: template the pageserver config
|
|
||||||
template:
|
|
||||||
src: templates/pageserver.toml.j2
|
|
||||||
dest: /storage/pageserver/data/pageserver.toml
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: upload systemd service definition
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: systemd/pageserver.service
|
|
||||||
dest: /etc/systemd/system/pageserver.service
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: start systemd service
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: pageserver
|
|
||||||
enabled: yes
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: post version to console
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
shell:
|
|
||||||
cmd: |
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/pageservers
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: Deploy safekeeper
|
|
||||||
hosts: safekeepers
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: upload init script
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: scripts/init_safekeeper.sh
|
|
||||||
dest: /tmp/init_safekeeper.sh
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: init safekeeper
|
|
||||||
shell:
|
|
||||||
cmd: /tmp/init_safekeeper.sh
|
|
||||||
args:
|
|
||||||
creates: "/storage/safekeeper/data/safekeeper.id"
|
|
||||||
environment:
|
|
||||||
NEON_REPO_DIR: "/storage/safekeeper/data"
|
|
||||||
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
# in the future safekeepers should discover pageservers byself
|
|
||||||
# but currently use first pageserver that was discovered
|
|
||||||
- name: set first pageserver var for safekeepers
|
|
||||||
set_fact:
|
|
||||||
first_pageserver: "{{ hostvars[groups['pageservers'][0]]['inventory_hostname'] }}"
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: upload systemd service definition
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: systemd/safekeeper.service
|
|
||||||
dest: /etc/systemd/system/safekeeper.service
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: start systemd service
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: safekeeper
|
|
||||||
enabled: yes
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: post version to console
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
shell:
|
|
||||||
cmd: |
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/safekeepers
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
42
.github/ansible/get_binaries.sh
vendored
42
.github/ansible/get_binaries.sh
vendored
@@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -n "${DOCKER_TAG}" ]; then
|
|
||||||
# Verson is DOCKER_TAG but without prefix
|
|
||||||
VERSION=$(echo $DOCKER_TAG | sed 's/^.*-//g')
|
|
||||||
else
|
|
||||||
echo "Please set DOCKER_TAG environment variable"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# do initial cleanup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz neon_install.tar.gz .neon_current_version
|
|
||||||
mkdir neon_install
|
|
||||||
|
|
||||||
# retrieve binaries from docker image
|
|
||||||
echo "getting binaries from docker image"
|
|
||||||
docker pull --quiet neondatabase/neon:${DOCKER_TAG}
|
|
||||||
ID=$(docker create neondatabase/neon:${DOCKER_TAG})
|
|
||||||
docker cp ${ID}:/data/postgres_install.tar.gz .
|
|
||||||
tar -xzf postgres_install.tar.gz -C neon_install
|
|
||||||
mkdir neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v14/lib/ neon_install/v14/lib/
|
|
||||||
docker cp ${ID}:/usr/local/v15/lib/ neon_install/v15/lib/
|
|
||||||
docker rm -vf ${ID}
|
|
||||||
|
|
||||||
# store version to file (for ansible playbooks) and create binaries tarball
|
|
||||||
echo ${VERSION} > neon_install/.neon_current_version
|
|
||||||
echo ${VERSION} > .neon_current_version
|
|
||||||
tar -czf neon_install.tar.gz -C neon_install .
|
|
||||||
|
|
||||||
# do final cleaup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz
|
|
||||||
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
@@ -1,38 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
bucket_region: ap-southeast-1
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: ap-southeast-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
console_region_id: aws-ap-southeast-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-064de8ea28bdb495b
|
|
||||||
pageserver-1.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b180defcaeeb6b93
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0d6f1dc5161eef894
|
|
||||||
safekeeper-1.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0e338adda8eb2d19f
|
|
||||||
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-04fb63634e4679eb9
|
|
||||||
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
@@ -1,38 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
bucket_region: eu-central-1
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-central-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
console_region_id: aws-eu-central-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0cd8d316ecbb715be
|
|
||||||
pageserver-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-090044ed3d383fef0
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b238612d2318a050
|
|
||||||
safekeeper-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-07b9c45e5c2637cd4
|
|
||||||
safekeeper-2.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-020257302c3c93d88
|
|
||||||
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
@@ -1,39 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.delta.us-east-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-062227ba7f119eb8c
|
|
||||||
pageserver-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0b3ec0afab5968938
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0e94224750c57d346
|
|
||||||
safekeeper-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-06d113fb73bfddeb0
|
|
||||||
safekeeper-2.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-09f66c8e04afff2e8
|
|
||||||
|
|
||||||
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
@@ -1,41 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-west-2
|
|
||||||
bucket_region: us-west-2
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.eta.us-west-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-west-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-west-2
|
|
||||||
console_region_id: aws-us-west-2-new
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0d9f6dfae0e1c780d
|
|
||||||
pageserver-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0c834be1dddba8b3f
|
|
||||||
pageserver-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-051642d372c0a4f32
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-00719d8a74986fda6
|
|
||||||
safekeeper-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-074682f9d3c712e7c
|
|
||||||
safekeeper-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-042b7efb1729d7966
|
|
||||||
|
|
||||||
40
.github/ansible/production.hosts.yaml
vendored
40
.github/ansible/production.hosts.yaml
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
storage:
|
|
||||||
vars:
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
bucket_name: zenith-storage-oregon
|
|
||||||
bucket_region: us-west-2
|
|
||||||
broker_endpoint: http://storage-broker.prod.local:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "{{ inventory_hostname }}"
|
|
||||||
safekeeper_s3_prefix: prod-1/wal
|
|
||||||
hostname_suffix: ".local"
|
|
||||||
remote_user: admin
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
zenith-1-ps-2:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-3:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-4:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-5:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
zenith-1-sk-1:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-sk-2:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-sk-4:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
33
.github/ansible/scripts/init_pageserver.sh
vendored
33
.github/ansible/scripts/init_pageserver.sh
vendored
@@ -1,33 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fetch params from meta-data service
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
|
||||||
HOST=$(hostname -f)
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF | tee /tmp/payload
|
|
||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"host": "${HOST}",
|
|
||||||
"port": 6400,
|
|
||||||
"region_id": "{{ console_region_id }}",
|
|
||||||
"instance_id": "${INSTANCE_ID}",
|
|
||||||
"http_host": "${HOST}",
|
|
||||||
"http_port": 9898,
|
|
||||||
"active": false,
|
|
||||||
"availability_zone_id": "${AZ_ID}"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# check if pageserver already registered or not
|
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
|
|
||||||
|
|
||||||
# not registered, so register it now
|
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
|
|
||||||
|
|
||||||
# init pageserver
|
|
||||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
|
||||||
fi
|
|
||||||
31
.github/ansible/scripts/init_safekeeper.sh
vendored
31
.github/ansible/scripts/init_safekeeper.sh
vendored
@@ -1,31 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fetch params from meta-data service
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
|
||||||
HOST=$(hostname -f)
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF | tee /tmp/payload
|
|
||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"host": "${HOST}",
|
|
||||||
"port": 6500,
|
|
||||||
"http_port": 7676,
|
|
||||||
"region_id": "{{ console_region_id }}",
|
|
||||||
"instance_id": "${INSTANCE_ID}",
|
|
||||||
"availability_zone_id": "${AZ_ID}",
|
|
||||||
"active": false
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# check if safekeeper already registered or not
|
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
|
||||||
|
|
||||||
# not registered, so register it now
|
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
|
|
||||||
# init safekeeper
|
|
||||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
|
||||||
fi
|
|
||||||
2
.github/ansible/ssm_config
vendored
2
.github/ansible/ssm_config
vendored
@@ -1,2 +0,0 @@
|
|||||||
ansible_connection: aws_ssm
|
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
|
||||||
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
@@ -1,36 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
bucket_region: eu-west-1
|
|
||||||
console_mgmt_base_url: http://console-staging.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-west-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
console_region_id: aws-eu-west-1
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-01d496c5041c7f34c
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-05226ef85722831bf
|
|
||||||
safekeeper-1.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-06969ee1bf2958bfc
|
|
||||||
safekeeper-2.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-087892e9625984a0b
|
|
||||||
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
@@ -1,42 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-staging-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://console-staging.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.beta.us-east-2.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-staging-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0c3e70929edb5d691
|
|
||||||
pageserver-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0565a8b4008aa3f40
|
|
||||||
pageserver-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-01e31cdf7e970586a
|
|
||||||
pageserver-3.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0602a0291365ef7cc
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-027662bd552bf5db0
|
|
||||||
safekeeper-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0171efc3604a7b907
|
|
||||||
safekeeper-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0de0b03a51676a6ce
|
|
||||||
18
.github/ansible/systemd/pageserver.service
vendored
18
.github/ansible/systemd/pageserver.service
vendored
@@ -1,18 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Neon pageserver
|
|
||||||
After=network.target auditd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=pageserver
|
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_PAGESERVER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
|
||||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoint='{{ broker_endpoint }}'" -D /storage/pageserver/data
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
KillMode=mixed
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
TimeoutSec=10
|
|
||||||
LimitNOFILE=30000000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
18
.github/ansible/systemd/safekeeper.service
vendored
18
.github/ansible/systemd/safekeeper.service
vendored
@@ -1,18 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Neon safekeeper
|
|
||||||
After=network.target auditd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=safekeeper
|
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_SAFEKEEPER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
|
||||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoint={{ broker_endpoint }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ safekeeper_s3_prefix }}"}'
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
KillMode=mixed
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
TimeoutSec=10
|
|
||||||
LimitNOFILE=30000000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
1
.github/ansible/templates/pageserver.toml.j2
vendored
1
.github/ansible/templates/pageserver.toml.j2
vendored
@@ -1 +0,0 @@
|
|||||||
{{ pageserver_config | sivel.toiletwater.to_toml }}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.eu-west-1.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: eu-west-1
|
|
||||||
zenith_region_slug: eu-west-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-link.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.stage.neon.tech/psql_session/"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy-link pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.cloud.stage.neon.tech"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram-legacy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.beta.us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.beta.us-east-2.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.ap-southeast-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: ap-southeast-1
|
|
||||||
zenith_region_slug: ap-southeast-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: ap-southeast-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.eu-central-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: eu-central-1
|
|
||||||
zenith_region_slug: eu-central-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-central-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.delta.us-east-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.us-west-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: us-west-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-west-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.eta.us-west-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker.prod.local
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
54
.github/helm-values/production.proxy-scram.yaml
vendored
54
.github/helm-values/production.proxy-scram.yaml
vendored
@@ -1,54 +0,0 @@
|
|||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.cloud.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: oregon
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech'
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
59
.github/helm-values/production.proxy.yaml
vendored
59
.github/helm-values/production.proxy.yaml
vendored
@@ -1,59 +0,0 @@
|
|||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.neon.tech/psql_session/"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: oregon
|
|
||||||
|
|
||||||
service:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: proxy-release.local
|
|
||||||
type: LoadBalancer
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: connect.neon.tech,pg.neon.tech
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,10 +1,14 @@
|
|||||||
## Describe your changes
|
## Problem
|
||||||
|
|
||||||
## Issue ticket number and link
|
## Summary of changes
|
||||||
|
|
||||||
## Checklist before requesting a review
|
## Checklist before requesting a review
|
||||||
|
|
||||||
- [ ] I have performed a self-review of my code.
|
- [ ] I have performed a self-review of my code.
|
||||||
- [ ] If it is a core feature, I have added thorough tests.
|
- [ ] If it is a core feature, I have added thorough tests.
|
||||||
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
||||||
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
||||||
|
|
||||||
|
## Checklist before merging
|
||||||
|
|
||||||
|
- [ ] Do not forget to reformat commit message to not include the above checklist
|
||||||
222
.github/workflows/benchmarking.yml
vendored
222
.github/workflows/benchmarking.yml
vendored
@@ -16,12 +16,12 @@ on:
|
|||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
inputs:
|
inputs:
|
||||||
region_id:
|
region_id:
|
||||||
description: 'Use a particular region. If not set the default region will be used'
|
description: 'Project region id. If not set, the default region will be used'
|
||||||
required: false
|
required: false
|
||||||
default: 'aws-us-east-2'
|
default: 'aws-us-east-2'
|
||||||
save_perf_report:
|
save_perf_report:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
description: 'Publish perf report. If not set, the report will be published only for the main branch'
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -30,7 +30,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -42,7 +42,7 @@ jobs:
|
|||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: "neon-staging"
|
PLATFORM: "neon-staging"
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -92,11 +92,8 @@ jobs:
|
|||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -107,25 +104,66 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
generate-matrices:
|
||||||
|
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday)
|
||||||
|
#
|
||||||
|
# Available platforms:
|
||||||
|
# - neon-captest-new: Freshly created project (1 CU)
|
||||||
|
# - neon-captest-freetier: Use freetier-sized compute (0.25 CU)
|
||||||
|
# - neon-captest-reuse: Reusing existing project
|
||||||
|
# - rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }}
|
||||||
|
olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Generate matrix for pgbench benchmark
|
||||||
|
id: pgbench-compare-matrix
|
||||||
|
run: |
|
||||||
|
matrix='{
|
||||||
|
"platform": [
|
||||||
|
"neon-captest-new",
|
||||||
|
"neon-captest-reuse",
|
||||||
|
"neonvm-captest-new"
|
||||||
|
],
|
||||||
|
"db_size": [ "10gb" ],
|
||||||
|
"include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" },
|
||||||
|
{ "platform": "neon-captest-new", "db_size": "50gb" },
|
||||||
|
{ "platform": "neonvm-captest-freetier", "db_size": "3gb" },
|
||||||
|
{ "platform": "neonvm-captest-new", "db_size": "50gb" }]
|
||||||
|
}'
|
||||||
|
|
||||||
|
if [ "$(date +%A)" = "Saturday" ]; then
|
||||||
|
matrix=$(echo $matrix | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"},
|
||||||
|
{ "platform": "rds-aurora", "db_size": "50gb"}]')
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "matrix=$(echo $matrix | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Generate matrix for OLAP benchmarks
|
||||||
|
id: olap-compare-matrix
|
||||||
|
run: |
|
||||||
|
matrix='{
|
||||||
|
"platform": [
|
||||||
|
"neon-captest-reuse"
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
if [ "$(date +%A)" = "Saturday" ]; then
|
||||||
|
matrix=$(echo $matrix | jq '.include += [{ "platform": "rds-postgres" },
|
||||||
|
{ "platform": "rds-aurora" }]')
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "matrix=$(echo $matrix | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
pgbench-compare:
|
pgbench-compare:
|
||||||
|
needs: [ generate-matrices ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{fromJson(needs.generate-matrices.outputs.pgbench-compare-matrix)}}
|
||||||
# neon-captest-new: Run pgbench in a freshly created project
|
|
||||||
# neon-captest-reuse: Same, but reusing existing project
|
|
||||||
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-reuse, neon-captest-prefetch, rds-postgres ]
|
|
||||||
db_size: [ 10gb ]
|
|
||||||
runner: [ us-east-2 ]
|
|
||||||
include:
|
|
||||||
- platform: neon-captest-prefetch
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
- platform: rds-aurora
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
@@ -134,10 +172,10 @@ jobs:
|
|||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, "${{ matrix.runner }}", x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -160,13 +198,15 @@ jobs:
|
|||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
|
if: contains(fromJson('["neon-captest-new", "neon-captest-freetier", "neonvm-captest-new", "neonvm-captest-freetier"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
compute_units: ${{ (matrix.platform == 'neon-captest-freetier' && '[0.25, 0.25]') || '[1, 1]' }}
|
||||||
|
provisioner: ${{ (contains(matrix.platform, 'neonvm-') && 'k8s-neonvm') || 'k8s-pod' }}
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -175,7 +215,7 @@ jobs:
|
|||||||
neon-captest-reuse)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neon-captest-new | neon-captest-prefetch)
|
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -185,7 +225,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -194,17 +234,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -252,11 +281,8 @@ jobs:
|
|||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -275,23 +301,19 @@ jobs:
|
|||||||
#
|
#
|
||||||
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
||||||
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ pgbench-compare ]
|
needs: [ generate-matrices, pgbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -320,7 +342,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -330,7 +352,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -339,17 +361,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: ClickBench benchmark
|
- name: ClickBench benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -364,11 +375,8 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -386,23 +394,19 @@ jobs:
|
|||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
#
|
#
|
||||||
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ clickbench-compare ]
|
needs: [ generate-matrices, clickbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -431,7 +435,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -441,7 +445,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -450,17 +454,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run TPC-H benchmark
|
- name: Run TPC-H benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -475,11 +468,8 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -491,23 +481,19 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
user-examples-compare:
|
user-examples-compare:
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ tpch-compare ]
|
needs: [ generate-matrices, tpch-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -536,7 +522,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -546,7 +532,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -555,17 +541,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run user examples
|
- name: Run user examples
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -580,17 +555,14 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
slack-message: "Periodic User example perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|||||||
1011
.github/workflows/build_and_test.yml
vendored
1011
.github/workflows/build_and_test.yml
vendored
File diff suppressed because it is too large
Load Diff
52
.github/workflows/neon_extra_builds.yml
vendored
52
.github/workflows/neon_extra_builds.yml
vendored
@@ -4,6 +4,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@@ -11,7 +12,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -20,6 +21,7 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-macos-build:
|
check-macos-build:
|
||||||
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos')
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
@@ -51,14 +53,14 @@ jobs:
|
|||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: pg_install/v14
|
path: pg_install/v14
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
- name: Cache postgres v15 build
|
- name: Cache postgres v15 build
|
||||||
id: cache_pg_15
|
id: cache_pg_15
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: pg_install/v15
|
path: pg_install/v15
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
- name: Set extra env for macOS
|
||||||
run: |
|
run: |
|
||||||
@@ -93,11 +95,16 @@ jobs:
|
|||||||
run: ./run_clippy.sh
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
timeout-minutes: 90
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats')
|
||||||
runs-on: ubuntu-latest
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: release
|
BUILD_TYPE: release
|
||||||
|
# remove the cachepot wrapper and build without crate caches
|
||||||
|
RUSTC_WRAPPER: ""
|
||||||
# build with incremental compilation produce partial results
|
# build with incremental compilation produce partial results
|
||||||
# so do not attempt to cache this build, also disable the incremental compilation
|
# so do not attempt to cache this build, also disable the incremental compilation
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
@@ -109,11 +116,6 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Install Ubuntu postgres dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev protobuf-compiler
|
|
||||||
|
|
||||||
# Some of our rust modules use FFI and need those to be checked
|
# Some of our rust modules use FFI and need those to be checked
|
||||||
- name: Get postgres headers
|
- name: Get postgres headers
|
||||||
run: make postgres-headers -j$(nproc)
|
run: make postgres-headers -j$(nproc)
|
||||||
@@ -122,7 +124,31 @@ jobs:
|
|||||||
run: cargo build --all --release --timings
|
run: cargo build --all --release --timings
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
uses: actions/upload-artifact@v3
|
id: upload-stats
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
run: |
|
||||||
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Publish build stats report
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-release-build-stats
|
script: |
|
||||||
path: ./target/cargo-timings/
|
const { REPORT_URL, SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: `Build stats (release)`,
|
||||||
|
})
|
||||||
|
|||||||
2
.github/workflows/pg_clients.yml
vendored
2
.github/workflows/pg_clients.yml
vendored
@@ -14,7 +14,7 @@ on:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
33
.github/workflows/release.yml
vendored
Normal file
33
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Create Release Branch
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 10 * * 2'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
create_release_branch:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Create release branch
|
||||||
|
run: git checkout -b releases/${{ steps.date.outputs.date }}
|
||||||
|
|
||||||
|
- name: Push new branch
|
||||||
|
run: git push origin releases/${{ steps.date.outputs.date }}
|
||||||
|
|
||||||
|
- name: Create pull request into release
|
||||||
|
uses: thomaseizinger/create-pull-request@e3972219c86a56550fb70708d96800d8e24ba862 # 1.3.0
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
head: releases/${{ steps.date.outputs.date }}
|
||||||
|
base: release
|
||||||
|
title: Release ${{ steps.date.outputs.date }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,5 +1,6 @@
|
|||||||
/pg_install
|
/pg_install
|
||||||
/target
|
/target
|
||||||
|
/alek_ext/target
|
||||||
/tmp_check
|
/tmp_check
|
||||||
/tmp_check_cli
|
/tmp_check_cli
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
|||||||
4
.neon_clippy_args
Normal file
4
.neon_clippy_args
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# * `-A unknown_lints` – do not warn about unknown lint suppressions
|
||||||
|
# that people with newer toolchains might use
|
||||||
|
# * `-D warnings` - fail on any warnings (`cargo` returns non-zero exit status)
|
||||||
|
export CLIPPY_COMMON_ARGS="--locked --workspace --all-targets -- -A unknown_lints -D warnings"
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Howdy! Usual good software engineering practices apply. Write
|
Howdy! Usual good software engineering practices apply. Write
|
||||||
tests. Write comments. Follow standard Rust coding practices where
|
tests. Write comments. Follow standard Rust coding practices where
|
||||||
possible. Use 'cargo fmt' and 'clippy' to tidy up formatting.
|
possible. Use `cargo fmt` and `cargo clippy` to tidy up formatting.
|
||||||
|
|
||||||
There are soft spots in the code, which could use cleanup,
|
There are soft spots in the code, which could use cleanup,
|
||||||
refactoring, additional comments, and so forth. Let's try to raise the
|
refactoring, additional comments, and so forth. Let's try to raise the
|
||||||
|
|||||||
2174
Cargo.lock
generated
2174
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
96
Cargo.toml
96
Cargo.toml
@@ -1,13 +1,29 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
|
"alek_ext",
|
||||||
"compute_tools",
|
"compute_tools",
|
||||||
"control_plane",
|
"control_plane",
|
||||||
"pageserver",
|
"pageserver",
|
||||||
|
"pageserver/ctl",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
"libs/*",
|
"trace",
|
||||||
|
"libs/compute_api",
|
||||||
|
"libs/pageserver_api",
|
||||||
|
"libs/postgres_ffi",
|
||||||
|
"libs/safekeeper_api",
|
||||||
|
"libs/utils",
|
||||||
|
"libs/consumption_metrics",
|
||||||
|
"libs/postgres_backend",
|
||||||
|
"libs/pq_proto",
|
||||||
|
"libs/tenant_size_model",
|
||||||
|
"libs/metrics",
|
||||||
|
"libs/postgres_connection",
|
||||||
|
"libs/remote_storage",
|
||||||
|
"libs/tracing-utils",
|
||||||
|
"libs/postgres_ffi/wal_craft",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -20,23 +36,27 @@ anyhow = { version = "1.0", features = ["backtrace"] }
|
|||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
atty = "0.2.14"
|
atty = "0.2.14"
|
||||||
aws-config = { version = "0.51.0", default-features = false, features=["rustls"] }
|
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
||||||
aws-sdk-s3 = "0.21.0"
|
aws-sdk-s3 = "0.27"
|
||||||
aws-smithy-http = "0.51.0"
|
aws-smithy-http = "0.55"
|
||||||
aws-types = "0.51.0"
|
aws-credential-types = "0.55"
|
||||||
|
aws-types = "0.55"
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
bindgen = "0.61"
|
bindgen = "0.65"
|
||||||
bstr = "1.0"
|
bstr = "1.0"
|
||||||
byteorder = "1.4"
|
byteorder = "1.4"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||||
clap = "4.0"
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
close_fds = "0.3.2"
|
close_fds = "0.3.2"
|
||||||
comfy-table = "6.1"
|
comfy-table = "6.1"
|
||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
crossbeam-utils = "0.8.5"
|
crossbeam-utils = "0.8.5"
|
||||||
|
either = "1.8"
|
||||||
|
enum-map = "2.4.2"
|
||||||
|
enumset = "1.0.12"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
fs2 = "0.4.3"
|
fs2 = "0.4.3"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
@@ -44,8 +64,9 @@ futures-core = "0.3"
|
|||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
git-version = "0.3"
|
git-version = "0.3"
|
||||||
hashbrown = "0.13"
|
hashbrown = "0.13"
|
||||||
|
hashlink = "0.8.1"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hex-literal = "0.3"
|
hex-literal = "0.4"
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
hostname = "0.3.1"
|
hostname = "0.3.1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -57,10 +78,15 @@ jsonwebtoken = "8"
|
|||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
md5 = "0.7.0"
|
md5 = "0.7.0"
|
||||||
memoffset = "0.8"
|
memoffset = "0.8"
|
||||||
|
native-tls = "0.2"
|
||||||
nix = "0.26"
|
nix = "0.26"
|
||||||
notify = "5.0.0"
|
notify = "5.0.0"
|
||||||
|
num_cpus = "1.15"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
once_cell = "1.13"
|
once_cell = "1.13"
|
||||||
|
opentelemetry = "0.18.0"
|
||||||
|
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
|
opentelemetry-semantic-conventions = "0.10.0"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
||||||
@@ -68,56 +94,69 @@ prost = "0.11"
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.4"
|
regex = "1.4"
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
||||||
|
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_18"] }
|
||||||
|
reqwest-middleware = "0.2.0"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
rpds = "0.12.0"
|
rpds = "0.13"
|
||||||
rustls = "0.20"
|
rustls = "0.20"
|
||||||
rustls-pemfile = "1"
|
rustls-pemfile = "1"
|
||||||
rustls-split = "0.3"
|
rustls-split = "0.3"
|
||||||
scopeguard = "1.1"
|
scopeguard = "1.1"
|
||||||
sentry = { version = "0.29", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
sentry = { version = "0.30", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
serde_with = "2.0"
|
serde_with = "2.0"
|
||||||
sha2 = "0.10.2"
|
sha2 = "0.10.2"
|
||||||
signal-hook = "0.3"
|
signal-hook = "0.3"
|
||||||
socket2 = "0.4.4"
|
socket2 = "0.5"
|
||||||
strum = "0.24"
|
strum = "0.24"
|
||||||
strum_macros = "0.24"
|
strum_macros = "0.24"
|
||||||
svg_fmt = "0.4.1"
|
svg_fmt = "0.4.1"
|
||||||
|
sync_wrapper = "0.1.2"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
|
test-context = "0.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
||||||
tokio = { version = "1.17", features = ["macros"] }
|
tokio = { version = "1.17", features = ["macros"] }
|
||||||
|
tokio-io-timeout = "1.2.0"
|
||||||
tokio-postgres-rustls = "0.9.0"
|
tokio-postgres-rustls = "0.9.0"
|
||||||
tokio-rustls = "0.23"
|
tokio-rustls = "0.23"
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
toml = "0.5"
|
toml = "0.7"
|
||||||
toml_edit = { version = "0.17", features = ["easy"] }
|
toml_edit = "0.19"
|
||||||
tonic = {version = "0.8", features = ["tls", "tls-roots"]}
|
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
tracing-error = "0.2.0"
|
||||||
|
tracing-opentelemetry = "0.18.0"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
url = "2.2"
|
url = "2.2"
|
||||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||||
walkdir = "2.3.2"
|
walkdir = "2.3.2"
|
||||||
webpki-roots = "0.22.5"
|
webpki-roots = "0.23"
|
||||||
x509-parser = "0.14"
|
x509-parser = "0.15"
|
||||||
|
|
||||||
## TODO replace this with tracing
|
## TODO replace this with tracing
|
||||||
env_logger = "0.10"
|
env_logger = "0.10"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
|
||||||
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-native-tls = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
||||||
|
|
||||||
|
## Other git libraries
|
||||||
|
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||||
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
||||||
|
postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
|
||||||
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
||||||
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
@@ -125,6 +164,7 @@ remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
|||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
|
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
|
||||||
|
tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" }
|
||||||
utils = { version = "0.1", path = "./libs/utils/" }
|
utils = { version = "0.1", path = "./libs/utils/" }
|
||||||
|
|
||||||
## Common library dependency
|
## Common library dependency
|
||||||
@@ -133,14 +173,20 @@ workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
|||||||
## Build dependencies
|
## Build dependencies
|
||||||
criterion = "0.4"
|
criterion = "0.4"
|
||||||
rcgen = "0.10"
|
rcgen = "0.10"
|
||||||
rstest = "0.16"
|
rstest = "0.17"
|
||||||
tempfile = "3.2"
|
tempfile = "3.4"
|
||||||
tonic-build = "0.8"
|
tonic-build = "0.9"
|
||||||
|
|
||||||
|
[patch.crates-io]
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
# This is only needed for proxy's tests.
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||||
[patch.crates-io]
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="2e9b5f1ddc481d1a98fa79f6b9378ac4f170b7c9" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
|
# Changes the MAX_THREADS limit from 4096 to 32768.
|
||||||
|
# This is a temporary workaround for using tracing from many threads in safekeepers code,
|
||||||
|
# until async safekeepers patch is merged to the main.
|
||||||
|
sharded-slab = { git = "https://github.com/neondatabase/sharded-slab.git", rev="98d16753ab01c61f0a028de44167307a00efea00" }
|
||||||
|
|
||||||
################# Binary contents sections
|
################# Binary contents sections
|
||||||
|
|
||||||
|
|||||||
17
Dockerfile
17
Dockerfile
@@ -2,7 +2,7 @@
|
|||||||
### The image itself is mainly used as a container for the binaries and for starting e2e tests with custom parameters.
|
### The image itself is mainly used as a container for the binaries and for starting e2e tests with custom parameters.
|
||||||
### By default, the binaries inside the image have some mock parameters and can start, but are not intended to be used
|
### By default, the binaries inside the image have some mock parameters and can start, but are not intended to be used
|
||||||
### inside this image in the real deployments.
|
### inside this image in the real deployments.
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG IMAGE=rust
|
ARG IMAGE=rust
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
|
|
||||||
@@ -39,12 +39,19 @@ ARG CACHEPOT_BUCKET=neon-github-dev
|
|||||||
|
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v14/include/postgresql/server pg_install/v14/include/postgresql/server
|
COPY --from=pg-build /home/nonroot/pg_install/v14/include/postgresql/server pg_install/v14/include/postgresql/server
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v15/include/postgresql/server pg_install/v15/include/postgresql/server
|
COPY --from=pg-build /home/nonroot/pg_install/v15/include/postgresql/server pg_install/v15/include/postgresql/server
|
||||||
COPY . .
|
COPY --chown=nonroot . .
|
||||||
|
|
||||||
# Show build caching stats to check if it was used in the end.
|
# Show build caching stats to check if it was used in the end.
|
||||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \
|
&& mold -run cargo build \
|
||||||
|
--bin pg_sni_router \
|
||||||
|
--bin pageserver \
|
||||||
|
--bin pagectl \
|
||||||
|
--bin safekeeper \
|
||||||
|
--bin storage_broker \
|
||||||
|
--bin proxy \
|
||||||
|
--locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Build final image
|
# Build final image
|
||||||
@@ -63,9 +70,9 @@ RUN set -e \
|
|||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
|
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pg_sni_router /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pagectl /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||||
|
|||||||
663
Dockerfile.compute-node
Normal file
663
Dockerfile.compute-node
Normal file
@@ -0,0 +1,663 @@
|
|||||||
|
ARG PG_VERSION
|
||||||
|
ARG REPOSITORY=neondatabase
|
||||||
|
ARG IMAGE=rust
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
||||||
|
libicu-dev libxslt1-dev liblz4-dev libzstd-dev
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
ARG PG_VERSION
|
||||||
|
COPY vendor/postgres-${PG_VERSION} postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
export CONFIGURE_CMD="./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp \
|
||||||
|
--with-icu --with-libxml --with-libxslt --with-lz4" && \
|
||||||
|
if [ "${PG_VERSION}" != "v14" ]; then \
|
||||||
|
# zstd is available only from PG15
|
||||||
|
export CONFIGURE_CMD="${CONFIGURE_CMD} --with-zstd"; \
|
||||||
|
fi && \
|
||||||
|
eval $CONFIGURE_CMD && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
||||||
|
# Enable some of contrib extensions
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/moddatetime.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_stat_statements.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/refint.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/xml2.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y cmake gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||||
|
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||||
|
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||||
|
protobuf-c-compiler xsltproc
|
||||||
|
|
||||||
|
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
||||||
|
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
||||||
|
echo "4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
|
cmake . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.2.tar.gz -O postgis.tar.gz && \
|
||||||
|
echo "9a2a219da005a1730a39d1959a1c7cec619b1efb009b65be80ffc25bad299068 postgis.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_sfcgal.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
||||||
|
|
||||||
|
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
||||||
|
echo "cac297c07d34460887c4f3b522b35c470138760fe358e351ad1db4edb6ee306e pgrouting.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y ninja-build python3-dev libncurses5 binutils clang
|
||||||
|
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.tar.gz && \
|
||||||
|
echo "1e108d5df639e4c189e1c5bdfa2432a521c126ca89e7e5a969d46899ca7bf106 plv8.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
find /usr/local/pgsql/ -name "plv8-*.so" | xargs strip && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plcoffee.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plls.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "h3-pg-build"
|
||||||
|
# Build h3_pg
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS h3-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# packaged cmake is too old
|
||||||
|
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||||
|
-q -O /tmp/cmake-install.sh \
|
||||||
|
&& echo "739d372726cb23129d57a539ce1432453448816e345e1545f6127296926b6754 /tmp/cmake-install.sh" | sha256sum --check \
|
||||||
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|
||||||
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
||||||
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
|
mkdir build && cd build && \
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 make install && \
|
||||||
|
cp -R /h3/usr / && \
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.2.tar.gz -O h3-pg.tar.gz && \
|
||||||
|
echo "c135aa45999b2ad1326d2537c1cadef96d52660838e4ca371706c08fdea1a956 h3-pg.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "unit-pg-build"
|
||||||
|
# compile unit extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS unit-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
||||||
|
echo "411d05beeb97e5a4abf17572bfcfbb5a68d98d1018918feff995f6ee3bb03e79 postgresql-unit.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
||||||
|
# We move the extension from '/usr/local/pgsql/' to '/usr/local/' after it is build. So we need to adjust the path.
|
||||||
|
# This one-liner removes pgsql/ part of the path.
|
||||||
|
# NOTE: Other extensions that rely on MODULEDIR variable after building phase will need the same fix.
|
||||||
|
find /usr/local/pgsql/share/extension/ -name "unit*.sql" -print0 | xargs -0 sed -i "s|pgsql/||g" && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/unit.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "vector-pg-build"
|
||||||
|
# compile pgvector extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS vector-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.4.0.tar.gz -O pgvector.tar.gz && \
|
||||||
|
echo "b76cf84ddad452cc880a6c8c661d137ddd8679c000a16332f4f03ecf6e10bcc8 pgvector.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pgjwt-pg-build"
|
||||||
|
# compile pgjwt extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pgjwt-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
||||||
|
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
||||||
|
echo "cfdefb15007286f67d3d45510f04a6a7a495004be5b3aecb12cda667e774203f pgjwt.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "hypopg-pg-build"
|
||||||
|
# compile hypopg extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS hypopg-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.3.1.tar.gz -O hypopg.tar.gz && \
|
||||||
|
echo "e7f01ee0259dc1713f318a108f987663d60f3041948c2ada57a94b469565ca8e hypopg.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hypopg.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-hashids-pg-build"
|
||||||
|
# compile pg_hashids extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-hashids-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
||||||
|
echo "74576b992d9277c92196dd8d816baa2cc2d8046fe102f3dcd7f3c3febed6822a pg_hashids.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_hashids.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "rum-pg-build"
|
||||||
|
# compile rum extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS rum-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
||||||
|
echo "6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d rum.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rum.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pgtap-pg-build"
|
||||||
|
# compile pgTAP extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pgtap-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
||||||
|
echo "9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 pgtap.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "ip4r-pg-build"
|
||||||
|
# compile ip4r extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS ip4r-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/RhodiumToad/ip4r/archive/refs/tags/2.4.1.tar.gz -O ip4r.tar.gz && \
|
||||||
|
echo "78b9f0c1ae45c22182768fe892a32d533c82281035e10914111400bf6301c726 ip4r.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir ip4r-src && cd ip4r-src && tar xvzf ../ip4r.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/ip4r.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "prefix-pg-build"
|
||||||
|
# compile Prefix extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS prefix-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.9.tar.gz -O prefix.tar.gz && \
|
||||||
|
echo "38d30a08d0241a8bbb8e1eb8f0152b385051665a8e621c8899e7c5068f8b511e prefix.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir prefix-src && cd prefix-src && tar xvzf ../prefix.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/prefix.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "hll-pg-build"
|
||||||
|
# compile hll extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS hll-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.17.tar.gz -O hll.tar.gz && \
|
||||||
|
echo "9a18288e884f197196b0d29b9f178ba595b0dfc21fbf7a8699380e77fa04c1e9 hll.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir hll-src && cd hll-src && tar xvzf ../hll.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hll.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "plpgsql-check-pg-build"
|
||||||
|
# compile plpgsql_check extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS plpgsql-check-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.3.2.tar.gz -O plpgsql_check.tar.gz && \
|
||||||
|
echo "9d81167c4bbeb74eebf7d60147b21961506161addc2aee537f95ad8efeae427b plpgsql_check.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir plpgsql_check-src && cd plpgsql_check-src && tar xvzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "timescaledb-pg-build"
|
||||||
|
# compile timescaledb extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS timescaledb-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y cmake && \
|
||||||
|
wget https://github.com/timescale/timescaledb/archive/refs/tags/2.10.1.tar.gz -O timescaledb.tar.gz && \
|
||||||
|
echo "6fca72a6ed0f6d32d2b3523951ede73dc5f9b0077b38450a029a5f411fdb8c73 timescaledb.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir timescaledb-src && cd timescaledb-src && tar xvzf ../timescaledb.tar.gz --strip-components=1 -C . && \
|
||||||
|
./bootstrap -DSEND_TELEMETRY_DEFAULT:BOOL=OFF -DUSE_TELEMETRY:BOOL=OFF -DAPACHE_ONLY:BOOL=ON && \
|
||||||
|
cd build && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/timescaledb.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-hint-plan-pg-build"
|
||||||
|
# compile pg_hint_plan extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-hint-plan-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ARG PG_VERSION
|
||||||
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
|
RUN case "${PG_VERSION}" in \
|
||||||
|
"v14") \
|
||||||
|
export PG_HINT_PLAN_VERSION=14_1_4_1 \
|
||||||
|
export PG_HINT_PLAN_CHECKSUM=c3501becf70ead27f70626bce80ea401ceac6a77e2083ee5f3ff1f1444ec1ad1 \
|
||||||
|
;; \
|
||||||
|
"v15") \
|
||||||
|
export PG_HINT_PLAN_VERSION=15_1_5_0 \
|
||||||
|
export PG_HINT_PLAN_CHECKSUM=564cbbf4820973ffece63fbf76e3c0af62c4ab23543142c7caaa682bc48918be \
|
||||||
|
;; \
|
||||||
|
*) \
|
||||||
|
echo "Export the valid PG_HINT_PLAN_VERSION variable" && exit 1 \
|
||||||
|
;; \
|
||||||
|
esac && \
|
||||||
|
wget https://github.com/ossc-db/pg_hint_plan/archive/refs/tags/REL${PG_HINT_PLAN_VERSION}.tar.gz -O pg_hint_plan.tar.gz && \
|
||||||
|
echo "${PG_HINT_PLAN_CHECKSUM} pg_hint_plan.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xvzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "kq-imcx-pg-build"
|
||||||
|
# compile kq_imcx extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS kq-imcx-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \
|
||||||
|
wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \
|
||||||
|
echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir kq_imcx-src && cd kq_imcx-src && tar xvzf ../kq_imcx.tar.gz --strip-components=1 -C . && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/kq_imcx.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-cron-pg-build"
|
||||||
|
# compile pg_cron extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-cron-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://github.com/citusdata/pg_cron/archive/refs/tags/v1.5.2.tar.gz -O pg_cron.tar.gz && \
|
||||||
|
echo "6f7f0980c03f1e2a6a747060e67bf4a303ca2a50e941e2c19daeed2b44dec744 pg_cron.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_cron-src && cd pg_cron-src && tar xvzf ../pg_cron.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_cron.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "rust extensions"
|
||||||
|
# This layer is used to build `pgx` deps
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS rust-extensions-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl libclang-dev cmake && \
|
||||||
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
|
ENV HOME=/home/nonroot
|
||||||
|
ENV PATH="/home/nonroot/.cargo/bin:/usr/local/pgsql/bin/:$PATH"
|
||||||
|
USER nonroot
|
||||||
|
WORKDIR /home/nonroot
|
||||||
|
ARG PG_VERSION
|
||||||
|
|
||||||
|
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && \
|
||||||
|
chmod +x rustup-init && \
|
||||||
|
./rustup-init -y --no-modify-path --profile minimal --default-toolchain stable && \
|
||||||
|
rm rustup-init && \
|
||||||
|
cargo install --locked --version 0.7.3 cargo-pgx && \
|
||||||
|
/bin/bash -c 'cargo pgx init --pg${PG_VERSION:1}=/usr/local/pgsql/bin/pg_config'
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-jsonschema-pg-build"
|
||||||
|
# Compile "pg_jsonschema" extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM rust-extensions-build AS pg-jsonschema-pg-build
|
||||||
|
|
||||||
|
# caeab60d70b2fd3ae421ec66466a3abbb37b7ee6 made on 06/03/2023
|
||||||
|
# there is no release tag yet, but we need it due to the superuser fix in the control file, switch to git tag after release >= 0.1.5
|
||||||
|
RUN wget https://github.com/supabase/pg_jsonschema/archive/caeab60d70b2fd3ae421ec66466a3abbb37b7ee6.tar.gz -O pg_jsonschema.tar.gz && \
|
||||||
|
echo "54129ce2e7ee7a585648dbb4cef6d73f795d94fe72f248ac01119992518469a4 pg_jsonschema.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xvzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \
|
||||||
|
sed -i 's/pgx = "0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
|
cargo pgx install --release && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-graphql-pg-build"
|
||||||
|
# Compile "pg_graphql" extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM rust-extensions-build AS pg-graphql-pg-build
|
||||||
|
|
||||||
|
# b4988843647450a153439be367168ed09971af85 made on 22/02/2023 (from remove-pgx-contrib-spiext branch)
|
||||||
|
# Currently pgx version bump to >= 0.7.2 causes "call to unsafe function" compliation errors in
|
||||||
|
# pgx-contrib-spiext. There is a branch that removes that dependency, so use it. It is on the
|
||||||
|
# same 1.1 version we've used before.
|
||||||
|
RUN wget https://github.com/yrashk/pg_graphql/archive/b4988843647450a153439be367168ed09971af85.tar.gz -O pg_graphql.tar.gz && \
|
||||||
|
echo "0c7b0e746441b2ec24187d0e03555faf935c2159e2839bddd14df6dafbc8c9bd pg_graphql.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_graphql-src && cd pg_graphql-src && tar xvzf ../pg_graphql.tar.gz --strip-components=1 -C . && \
|
||||||
|
sed -i 's/pgx = "~0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
|
sed -i 's/pgx-tests = "~0.7.1"/pgx-tests = "0.7.3"/g' Cargo.toml && \
|
||||||
|
cargo pgx install --release && \
|
||||||
|
# it's needed to enable extension because it uses untrusted C language
|
||||||
|
sed -i 's/superuser = false/superuser = true/g' /usr/local/pgsql/share/extension/pg_graphql.control && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_graphql.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-tiktoken-build"
|
||||||
|
# Compile "pg_tiktoken" extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM rust-extensions-build AS pg-tiktoken-pg-build
|
||||||
|
|
||||||
|
# 801f84f08c6881c8aa30f405fafbf00eec386a72 made on 10/03/2023
|
||||||
|
RUN wget https://github.com/kelvich/pg_tiktoken/archive/801f84f08c6881c8aa30f405fafbf00eec386a72.tar.gz -O pg_tiktoken.tar.gz && \
|
||||||
|
echo "52f60ac800993a49aa8c609961842b611b6b1949717b69ce2ec9117117e16e4a pg_tiktoken.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xvzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \
|
||||||
|
cargo pgx install --release && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-pgx-ulid-build"
|
||||||
|
# Compile "pgx_ulid" extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM rust-extensions-build AS pg-pgx-ulid-build
|
||||||
|
|
||||||
|
RUN wget https://github.com/pksunkara/pgx_ulid/archive/refs/tags/v0.1.0.tar.gz -O pgx_ulid.tar.gz && \
|
||||||
|
echo "908b7358e6f846e87db508ae5349fb56a88ee6305519074b12f3d5b0ff09f791 pgx_ulid.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgx_ulid-src && cd pgx_ulid-src && tar xvzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \
|
||||||
|
sed -i 's/pgx = "=0.7.3"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
|
cargo pgx install --release && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/ulid.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=postgis-build /sfcgal/* /
|
||||||
|
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /h3/usr /
|
||||||
|
COPY --from=unit-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=vector-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pgjwt-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-jsonschema-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-graphql-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-tiktoken-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=hypopg-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-hashids-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=rum-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pgtap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=ip4r-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=prefix-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=hll-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=plpgsql-check-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=timescaledb-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-hint-plan-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=kq-imcx-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-cron-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-pgx-ulid-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon_utils \
|
||||||
|
-s install && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/hnsw \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig && \
|
||||||
|
# create folder for file cache
|
||||||
|
mkdir -p -m 777 /neon/cache
|
||||||
|
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libicu67, locales for collations (including ICU and plpgsql_check)
|
||||||
|
# liblz4-1 for lz4
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
# libxml2, libxslt1.1 for xml2
|
||||||
|
# libzstd1 for zstd
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
gdb \
|
||||||
|
locales \
|
||||||
|
libicu67 \
|
||||||
|
liblz4-1 \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 \
|
||||||
|
libsfcgal1 \
|
||||||
|
libxml2 \
|
||||||
|
libxslt1.1 \
|
||||||
|
libzstd1 \
|
||||||
|
libcurl4-openssl-dev \
|
||||||
|
procps && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
@@ -1,220 +0,0 @@
|
|||||||
#
|
|
||||||
# This file is identical to the Dockerfile.compute-node-v15 file
|
|
||||||
# except for the version of Postgres that is built.
|
|
||||||
#
|
|
||||||
|
|
||||||
ARG TAG=pinned
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "build-deps"
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "pg-build"
|
|
||||||
# Build Postgres from the neon postgres repository.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
COPY vendor/postgres-v14 postgres
|
|
||||||
RUN cd postgres && \
|
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
|
||||||
# Enable some of contrib extensions
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "postgis-build"
|
|
||||||
# Build PostGIS from the upstream PostGIS mirror.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS postgis-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
|
||||||
tar xvzf postgis-3.3.1.tar.gz && \
|
|
||||||
cd postgis-3.3.1 && \
|
|
||||||
./autogen.sh && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
./configure && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
cd extensions/postgis && \
|
|
||||||
make clean && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "plv8-build"
|
|
||||||
# Build plv8
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS plv8-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
|
||||||
|
|
||||||
# https://github.com/plv8/plv8/issues/475:
|
|
||||||
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
|
||||||
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
|
||||||
# Install newer gold version manually as debian-testing binutils version updates
|
|
||||||
# libc version, which in turn breaks other extension built against non-testing libc.
|
|
||||||
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
|
||||||
tar xvzf binutils-2.38.tar.gz && \
|
|
||||||
cd binutils-2.38 && \
|
|
||||||
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
cd ../bfd && ./configure && make bfdver.h && \
|
|
||||||
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
|
||||||
cp /usr/local/bin/ld.gold /usr/bin/gold
|
|
||||||
|
|
||||||
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
|
||||||
tar xvzf v3.1.4.tar.gz && \
|
|
||||||
cd plv8-3.1.4 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
|
||||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
rm -rf /plv8-* && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "h3-pg-build"
|
|
||||||
# Build h3_pg
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS h3-pg-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
# packaged cmake is too old
|
|
||||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
|
||||||
-q -O /tmp/cmake-install.sh \
|
|
||||||
&& chmod u+x /tmp/cmake-install.sh \
|
|
||||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
|
||||||
&& rm /tmp/cmake-install.sh
|
|
||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
|
||||||
tar xvzf h3.tgz && \
|
|
||||||
cd h3-4.0.1 && \
|
|
||||||
mkdir build && \
|
|
||||||
cd build && \
|
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
DESTDIR=/h3 make install && \
|
|
||||||
cp -R /h3/usr / && \
|
|
||||||
rm -rf build
|
|
||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
|
||||||
tar xvzf h3-pg.tgz && \
|
|
||||||
cd h3-pg-4.0.1 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "neon-pg-ext-build"
|
|
||||||
# compile neon extensions
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS neon-pg-ext-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /h3/usr /
|
|
||||||
COPY pgxn/ pgxn/
|
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
|
||||||
-C pgxn/neon \
|
|
||||||
-s install
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Compile and run the Neon-specific `compute_ctl` binary
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Clean up postgres folder before inclusion
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
|
||||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
|
||||||
|
|
||||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
|
||||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
|
||||||
|
|
||||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
|
||||||
RUN rm -r /usr/local/pgsql/include
|
|
||||||
|
|
||||||
# Remove static postgresql libraries - all compilation is finished, so we
|
|
||||||
# can now remove these files - they must be included in other binaries by now
|
|
||||||
# if they were to be used by other libraries.
|
|
||||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Final layer
|
|
||||||
# Put it all together into the final image
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute && \
|
|
||||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# Install:
|
|
||||||
# libreadline8 for psql
|
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
|
||||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
|
||||||
RUN apt update && \
|
|
||||||
apt install --no-install-recommends -y \
|
|
||||||
libreadline8 \
|
|
||||||
libossp-uuid16 \
|
|
||||||
libgeos-c1v5 \
|
|
||||||
libgdal28 \
|
|
||||||
libproj19 \
|
|
||||||
libprotobuf-c1 \
|
|
||||||
gdb && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
@@ -1,220 +0,0 @@
|
|||||||
#
|
|
||||||
# This file is identical to the Dockerfile.compute-node-v14 file
|
|
||||||
# except for the version of Postgres that is built.
|
|
||||||
#
|
|
||||||
|
|
||||||
ARG TAG=pinned
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "build-deps"
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "pg-build"
|
|
||||||
# Build Postgres from the neon postgres repository.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
COPY vendor/postgres-v15 postgres
|
|
||||||
RUN cd postgres && \
|
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
|
||||||
# Enable some of contrib extensions
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "postgis-build"
|
|
||||||
# Build PostGIS from the upstream PostGIS mirror.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS postgis-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
|
||||||
tar xvzf postgis-3.3.1.tar.gz && \
|
|
||||||
cd postgis-3.3.1 && \
|
|
||||||
./autogen.sh && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
./configure && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
cd extensions/postgis && \
|
|
||||||
make clean && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "plv8-build"
|
|
||||||
# Build plv8
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS plv8-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
|
||||||
|
|
||||||
# https://github.com/plv8/plv8/issues/475:
|
|
||||||
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
|
||||||
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
|
||||||
# Install newer gold version manually as debian-testing binutils version updates
|
|
||||||
# libc version, which in turn breaks other extension built against non-testing libc.
|
|
||||||
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
|
||||||
tar xvzf binutils-2.38.tar.gz && \
|
|
||||||
cd binutils-2.38 && \
|
|
||||||
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
cd ../bfd && ./configure && make bfdver.h && \
|
|
||||||
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
|
||||||
cp /usr/local/bin/ld.gold /usr/bin/gold
|
|
||||||
|
|
||||||
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
|
||||||
tar xvzf v3.1.4.tar.gz && \
|
|
||||||
cd plv8-3.1.4 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
|
||||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
rm -rf /plv8-* && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "h3-pg-build"
|
|
||||||
# Build h3_pg
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS h3-pg-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
# packaged cmake is too old
|
|
||||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
|
||||||
-q -O /tmp/cmake-install.sh \
|
|
||||||
&& chmod u+x /tmp/cmake-install.sh \
|
|
||||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
|
||||||
&& rm /tmp/cmake-install.sh
|
|
||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
|
||||||
tar xvzf h3.tgz && \
|
|
||||||
cd h3-4.0.1 && \
|
|
||||||
mkdir build && \
|
|
||||||
cd build && \
|
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
DESTDIR=/h3 make install && \
|
|
||||||
cp -R /h3/usr / && \
|
|
||||||
rm -rf build
|
|
||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
|
||||||
tar xvzf h3-pg.tgz && \
|
|
||||||
cd h3-pg-4.0.1 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "neon-pg-ext-build"
|
|
||||||
# compile neon extensions
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS neon-pg-ext-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /h3/usr /
|
|
||||||
COPY pgxn/ pgxn/
|
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
|
||||||
-C pgxn/neon \
|
|
||||||
-s install
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Compile and run the Neon-specific `compute_ctl` binary
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Clean up postgres folder before inclusion
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
|
||||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
|
||||||
|
|
||||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
|
||||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
|
||||||
|
|
||||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
|
||||||
RUN rm -r /usr/local/pgsql/include
|
|
||||||
|
|
||||||
# Remove static postgresql libraries - all compilation is finished, so we
|
|
||||||
# can now remove these files - they must be included in other binaries by now
|
|
||||||
# if they were to be used by other libraries.
|
|
||||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Final layer
|
|
||||||
# Put it all together into the final image
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute && \
|
|
||||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# Install:
|
|
||||||
# libreadline8 for psql
|
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
|
||||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
|
||||||
RUN apt update && \
|
|
||||||
apt install --no-install-recommends -y \
|
|
||||||
libreadline8 \
|
|
||||||
libossp-uuid16 \
|
|
||||||
libgeos-c1v5 \
|
|
||||||
libgdal28 \
|
|
||||||
libproj19 \
|
|
||||||
libprotobuf-c1 \
|
|
||||||
gdb && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# First transient image to build compute_tools binaries
|
# First transient image to build compute_tools binaries
|
||||||
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
|
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG IMAGE=rust
|
ARG IMAGE=rust
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
|
|
||||||
|
|||||||
28
Makefile
28
Makefile
@@ -133,12 +133,34 @@ neon-pg-ext-%: postgres-%
|
|||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install
|
||||||
|
+@echo "Compiling neon_utils $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-utils-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install
|
||||||
|
+@echo "Compiling hnsw $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/hnsw-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/hnsw-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/hnsw/Makefile install
|
||||||
|
|
||||||
.PHONY: neon-pg-ext-clean-%
|
.PHONY: neon-pg-ext-clean-%
|
||||||
neon-pg-ext-clean-%:
|
neon-pg-ext-clean-%:
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_walredo-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-$* \
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_test_utils-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile clean
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/hnsw-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/hnsw/Makefile clean
|
||||||
|
|
||||||
.PHONY: neon-pg-ext
|
.PHONY: neon-pg-ext
|
||||||
neon-pg-ext: \
|
neon-pg-ext: \
|
||||||
|
|||||||
75
README.md
75
README.md
@@ -1,3 +1,5 @@
|
|||||||
|
[](https://neon.tech)
|
||||||
|
|
||||||
# Neon
|
# Neon
|
||||||
|
|
||||||
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||||
@@ -15,7 +17,7 @@ The Neon storage engine consists of two major components:
|
|||||||
- Pageserver. Scalable storage backend for the compute nodes.
|
- Pageserver. Scalable storage backend for the compute nodes.
|
||||||
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
||||||
|
|
||||||
See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more information.
|
See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information.
|
||||||
|
|
||||||
## Running local installation
|
## Running local installation
|
||||||
|
|
||||||
@@ -26,14 +28,22 @@ See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more inf
|
|||||||
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||||
```bash
|
```bash
|
||||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||||
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler
|
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler \
|
||||||
|
libcurl4-openssl-dev
|
||||||
```
|
```
|
||||||
* On Fedora, these packages are needed:
|
* On Fedora, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||||
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
||||||
protobuf-devel
|
protobuf-devel libcurl-devel
|
||||||
```
|
```
|
||||||
|
* On Arch based systems, these packages are needed:
|
||||||
|
```bash
|
||||||
|
pacman -S base-devel readline zlib libseccomp openssl clang \
|
||||||
|
postgresql-libs cmake postgresql protobuf curl
|
||||||
|
```
|
||||||
|
|
||||||
|
Building Neon requires 3.15+ version of `protoc` (protobuf-compiler). If your distribution provides an older version, you can install a newer version from [here](https://github.com/protocolbuffers/protobuf/releases).
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
```
|
```
|
||||||
@@ -41,11 +51,14 @@ dnf install flex bison readline-devel zlib-devel openssl-devel \
|
|||||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Installing dependencies on OSX (12.3.1)
|
#### Installing dependencies on macOS (12.3.1)
|
||||||
1. Install XCode and dependencies
|
1. Install XCode and dependencies
|
||||||
```
|
```
|
||||||
xcode-select --install
|
xcode-select --install
|
||||||
brew install protobuf openssl flex bison
|
brew install protobuf openssl flex bison
|
||||||
|
|
||||||
|
# add openssl to PATH, required for ed25519 keys generation in neon_local
|
||||||
|
echo 'export PATH="$(brew --prefix openssl)/bin:$PATH"' >> ~/.zshrc
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -83,9 +96,10 @@ cd neon
|
|||||||
|
|
||||||
# The preferred and default is to make a debug build. This will create a
|
# The preferred and default is to make a debug build. This will create a
|
||||||
# demonstrably slower build than a release build. For a release build,
|
# demonstrably slower build than a release build. For a release build,
|
||||||
# use "BUILD_TYPE=release make -j`nproc`"
|
# use "BUILD_TYPE=release make -j`nproc` -s"
|
||||||
|
# Remove -s for the verbose build log
|
||||||
|
|
||||||
make -j`nproc`
|
make -j`nproc` -s
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Building on OSX
|
#### Building on OSX
|
||||||
@@ -99,16 +113,17 @@ cd neon
|
|||||||
|
|
||||||
# The preferred and default is to make a debug build. This will create a
|
# The preferred and default is to make a debug build. This will create a
|
||||||
# demonstrably slower build than a release build. For a release build,
|
# demonstrably slower build than a release build. For a release build,
|
||||||
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu`"
|
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu` -s"
|
||||||
|
# Remove -s for the verbose build log
|
||||||
|
|
||||||
make -j`sysctl -n hw.logicalcpu`
|
make -j`sysctl -n hw.logicalcpu` -s
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Dependency installation notes
|
#### Dependency installation notes
|
||||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||||
|
|
||||||
To run the integration tests or Python scripts (not required to use the code), install
|
To run the integration tests or Python scripts (not required to use the code), install
|
||||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry](https://python-poetry.org/)) in the project directory.
|
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory.
|
||||||
|
|
||||||
|
|
||||||
#### Running neon database
|
#### Running neon database
|
||||||
@@ -116,11 +131,11 @@ Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (r
|
|||||||
```sh
|
```sh
|
||||||
# Create repository in .neon with proper paths to binaries and data
|
# Create repository in .neon with proper paths to binaries and data
|
||||||
# Later that would be responsibility of a package install script
|
# Later that would be responsibility of a package install script
|
||||||
> ./target/debug/neon_local init
|
> cargo neon init
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
|
|
||||||
# start pageserver, safekeeper, and broker for their intercommunication
|
# start pageserver, safekeeper, and broker for their intercommunication
|
||||||
> ./target/debug/neon_local start
|
> cargo neon start
|
||||||
Starting neon broker at 127.0.0.1:50051
|
Starting neon broker at 127.0.0.1:50051
|
||||||
storage_broker started, pid: 2918372
|
storage_broker started, pid: 2918372
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
@@ -129,21 +144,21 @@ Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
|||||||
safekeeper 1 started, pid: 2918437
|
safekeeper 1 started, pid: 2918437
|
||||||
|
|
||||||
# create initial tenant and use it as a default for every future neon_local invocation
|
# create initial tenant and use it as a default for every future neon_local invocation
|
||||||
> ./target/debug/neon_local tenant create --set-default
|
> cargo neon tenant create --set-default
|
||||||
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
||||||
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
||||||
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
||||||
|
|
||||||
# start postgres compute node
|
# start postgres compute node
|
||||||
> ./target/debug/neon_local pg start main
|
> cargo neon endpoint start main
|
||||||
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
Starting new endpoint main (PostgreSQL v14) on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
Starting postgres at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
||||||
|
|
||||||
# check list of running postgres instances
|
# check list of running postgres instances
|
||||||
> ./target/debug/neon_local pg list
|
> cargo neon endpoint list
|
||||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
ENDPOINT ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Now, it is possible to connect to postgres and run some queries:
|
2. Now, it is possible to connect to postgres and run some queries:
|
||||||
@@ -163,23 +178,23 @@ postgres=# select * from t;
|
|||||||
3. And create branches and run postgres on them:
|
3. And create branches and run postgres on them:
|
||||||
```sh
|
```sh
|
||||||
# create branch named migration_check
|
# create branch named migration_check
|
||||||
> ./target/debug/neon_local timeline branch --branch-name migration_check
|
> cargo neon timeline branch --branch-name migration_check
|
||||||
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'
|
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'
|
||||||
|
|
||||||
# check branches tree
|
# check branches tree
|
||||||
> ./target/debug/neon_local timeline list
|
> cargo neon timeline list
|
||||||
(L) main [de200bd42b49cc1814412c7e592dd6e9]
|
(L) main [de200bd42b49cc1814412c7e592dd6e9]
|
||||||
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]
|
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]
|
||||||
|
|
||||||
# start postgres on that branch
|
# start postgres on that branch
|
||||||
> ./target/debug/neon_local pg start migration_check --branch-name migration_check
|
> cargo neon endpoint start migration_check --branch-name migration_check
|
||||||
Starting new postgres migration_check on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
|
Starting new endpoint migration_check (PostgreSQL v14) on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/migration_check port=55433
|
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/migration_check port=55433
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55433 user=cloud_admin dbname=postgres'
|
Starting postgres at 'host=127.0.0.1 port=55433 user=cloud_admin dbname=postgres'
|
||||||
|
|
||||||
# check the new list of running postgres instances
|
# check the new list of running postgres instances
|
||||||
> ./target/debug/neon_local pg list
|
> cargo neon endpoint list
|
||||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
ENDPOINT ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16F9A38 running
|
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16F9A38 running
|
||||||
migration_check 127.0.0.1:55433 b3b863fa45fa9e57e615f9f2d944e601 migration_check 0/16F9A70 running
|
migration_check 127.0.0.1:55433 b3b863fa45fa9e57e615f9f2d944e601 migration_check 0/16F9A70 running
|
||||||
|
|
||||||
@@ -207,7 +222,7 @@ postgres=# select * from t;
|
|||||||
4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
|
4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
|
||||||
you have just started. You can terminate them all with one command:
|
you have just started. You can terminate them all with one command:
|
||||||
```sh
|
```sh
|
||||||
> ./target/debug/neon_local stop
|
> cargo neon stop
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running tests
|
## Running tests
|
||||||
@@ -224,9 +239,9 @@ CARGO_BUILD_FLAGS="--features=testing" make
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
[/docs/](/docs/) Contains a top-level overview of all available markdown documentation.
|
[docs](/docs) Contains a top-level overview of all available markdown documentation.
|
||||||
|
|
||||||
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
- [sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
||||||
|
|
||||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||||
|
|
||||||
@@ -251,6 +266,6 @@ To get more familiar with this aspect, refer to:
|
|||||||
|
|
||||||
## Join the development
|
## Join the development
|
||||||
|
|
||||||
- Read `CONTRIBUTING.md` to learn about project code style and practices.
|
- Read [CONTRIBUTING.md](/CONTRIBUTING.md) to learn about project code style and practices.
|
||||||
- To get familiar with a source tree layout, use [/docs/sourcetree.md](/docs/sourcetree.md).
|
- To get familiar with a source tree layout, use [sourcetree.md](/docs/sourcetree.md).
|
||||||
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
||||||
|
|||||||
3210
alek_ext/Cargo.lock
generated
Normal file
3210
alek_ext/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
21
alek_ext/Cargo.toml
Normal file
21
alek_ext/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
[package]
|
||||||
|
name = "alek_ext"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0.71"
|
||||||
|
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
||||||
|
aws-sdk-s3 = "0.27"
|
||||||
|
aws-smithy-http = "0.55"
|
||||||
|
aws-credential-types = "0.55"
|
||||||
|
aws-types = "0.55"
|
||||||
|
remote_storage = { version = "0.1", path = "../libs/remote_storage/" }
|
||||||
|
tokio = "1.28.2"
|
||||||
|
toml_edit = "0.19.10"
|
||||||
|
tracing = "0.1.37"
|
||||||
|
tracing-subscriber = "0.3.17"
|
||||||
|
|
||||||
|
[workspace]
|
||||||
6
alek_ext/fuzzystrmatch.control
Normal file
6
alek_ext/fuzzystrmatch.control
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# fuzzystrmatch extension
|
||||||
|
comment = 'determine similarities and distance between strings'
|
||||||
|
default_version = '1.2'
|
||||||
|
module_pathname = '$libdir/fuzzystrmatch'
|
||||||
|
relocatable = true
|
||||||
|
trusted = true
|
||||||
5
alek_ext/pg_cron.control
Normal file
5
alek_ext/pg_cron.control
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
comment = 'Job scheduler for PostgreSQL'
|
||||||
|
default_version = '1.5'
|
||||||
|
module_pathname = '$libdir/pg_cron'
|
||||||
|
relocatable = false
|
||||||
|
schema = pg_catalog
|
||||||
33
alek_ext/src/awsmwe_v1.rs
Normal file
33
alek_ext/src/awsmwe_v1.rs
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
/*
|
||||||
|
* This is a MWE of using the aws-sdk-s3 to download a file from an S3 bucket
|
||||||
|
* */
|
||||||
|
|
||||||
|
use aws_sdk_s3::{self, config::Region, Error};
|
||||||
|
use aws_config::{self, meta::region::RegionProviderChain};
|
||||||
|
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Error> {
|
||||||
|
let region_provider = RegionProviderChain::first_try(Region::new("eu-central-1"))
|
||||||
|
.or_default_provider()
|
||||||
|
.or_else(Region::new("eu-central-1"));
|
||||||
|
|
||||||
|
let shared_config = aws_config::from_env().region(region_provider).load().await;
|
||||||
|
let client = aws_sdk_s3::Client::new(&shared_config);
|
||||||
|
|
||||||
|
let bucket_name = "neon-dev-extensions";
|
||||||
|
let object_key = "fuzzystrmatch.control";
|
||||||
|
let response = client
|
||||||
|
.get_object()
|
||||||
|
.bucket(bucket_name)
|
||||||
|
.key(object_key)
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let stuff = response.body;
|
||||||
|
let data = stuff.collect().await.expect("error reading data").to_vec();
|
||||||
|
println!("data: {:?}", std::str::from_utf8(&data));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
52
alek_ext/src/download_with_remote_api_v2.rs
Normal file
52
alek_ext/src/download_with_remote_api_v2.rs
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
/* This is a MWE of using our RemoteStorage API to call the aws stuff and download a file
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
use remote_storage::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{BufWriter, Write};
|
||||||
|
use toml_edit;
|
||||||
|
use anyhow;
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let from_path = "fuzzystrmatch.control";
|
||||||
|
let remote_from_path = RemotePath::new(Path::new(from_path))?;
|
||||||
|
println!("{:?}", remote_from_path.clone());
|
||||||
|
|
||||||
|
// read configurations from `pageserver.toml`
|
||||||
|
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
|
||||||
|
let cfg_file_contents = std::fs::read_to_string(cfg_file_path).unwrap();
|
||||||
|
let toml = cfg_file_contents
|
||||||
|
.parse::<toml_edit::Document>()
|
||||||
|
.expect("Error parsing toml");
|
||||||
|
let remote_storage_data = toml.get("remote_storage")
|
||||||
|
.expect("field should be present");
|
||||||
|
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)
|
||||||
|
.expect("error parsing toml")
|
||||||
|
.expect("error parsing toml");
|
||||||
|
|
||||||
|
// query S3 bucket
|
||||||
|
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
|
||||||
|
let from_path = "fuzzystrmatch.control";
|
||||||
|
let remote_from_path = RemotePath::new(Path::new(from_path))?;
|
||||||
|
|
||||||
|
println!("{:?}", remote_from_path.clone());
|
||||||
|
// if let GenericRemoteStorage::AwsS3(mybucket) = remote_storage {
|
||||||
|
// println!("{:?}",mybucket.relative_path_to_s3_object(&remote_from_path));
|
||||||
|
// }
|
||||||
|
let mut data = remote_storage.download(&remote_from_path).await.expect("data yay");
|
||||||
|
let mut write_data_buffer = Vec::new();
|
||||||
|
data.download_stream.read_to_end(&mut write_data_buffer).await?;
|
||||||
|
let f = File::create("alek.out").expect("problem creating file");
|
||||||
|
let mut f = BufWriter::new(f);
|
||||||
|
f.write_all(&mut write_data_buffer).expect("error writing data");
|
||||||
|
|
||||||
|
// let stuff = response.body;
|
||||||
|
// let data = stuff.collect().await.expect("error reading data").to_vec();
|
||||||
|
// println!("data: {:?}", std::str::from_utf8(&data));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
53
alek_ext/src/localfiledownload_v0.rs
Normal file
53
alek_ext/src/localfiledownload_v0.rs
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
* This is a MWE of "downloading" a local file from a fake local bucket
|
||||||
|
* */
|
||||||
|
|
||||||
|
use remote_storage::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{BufWriter, Write};
|
||||||
|
use toml_edit;
|
||||||
|
use anyhow;
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
|
async fn download_file() -> anyhow::Result<()> {
|
||||||
|
// read configurations from `pageserver.toml`
|
||||||
|
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
|
||||||
|
let cfg_file_contents = std::fs::read_to_string(cfg_file_path).unwrap();
|
||||||
|
let toml = cfg_file_contents
|
||||||
|
.parse::<toml_edit::Document>()
|
||||||
|
.expect("Error parsing toml");
|
||||||
|
let remote_storage_data = toml.get("remote_storage")
|
||||||
|
.expect("field should be present");
|
||||||
|
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)
|
||||||
|
.expect("error parsing toml")
|
||||||
|
.expect("error parsing toml");
|
||||||
|
|
||||||
|
// query S3 bucket
|
||||||
|
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
|
||||||
|
let from_path = "neon-dev-extensions/fuzzystrmatch.control";
|
||||||
|
let remote_from_path = RemotePath::new(Path::new(from_path))?;
|
||||||
|
|
||||||
|
println!("im fine");
|
||||||
|
println!("{:?}",remote_storage_config);
|
||||||
|
|
||||||
|
let mut data = remote_storage.download(&remote_from_path).await.expect("data yay");
|
||||||
|
let mut write_data_buffer = Vec::new();
|
||||||
|
|
||||||
|
data.download_stream.read_to_end(&mut write_data_buffer).await?;
|
||||||
|
|
||||||
|
// write `data` to a file locally
|
||||||
|
let f = File::create("alek.out").expect("problem creating file");
|
||||||
|
let mut f = BufWriter::new(f);
|
||||||
|
f.write_all(&mut write_data_buffer).expect("error writing data");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
match download_file().await {
|
||||||
|
Err(_)=>println!("Err"),
|
||||||
|
_ => println!("SUCEECESS")
|
||||||
|
}
|
||||||
|
}
|
||||||
53
alek_ext/src/main.rs
Normal file
53
alek_ext/src/main.rs
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
* This is a MWE of using the RemoteStorage API to list and download files from aws
|
||||||
|
*/
|
||||||
|
macro_rules! alek { ($expression:expr) => { println!("{:?}", $expression); }; }
|
||||||
|
|
||||||
|
use remote_storage::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{BufWriter, Write};
|
||||||
|
use toml_edit;
|
||||||
|
use anyhow::{self, Context};
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
use tracing::*;
|
||||||
|
use tracing_subscriber;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let subscriber = tracing_subscriber::FmtSubscriber::new();
|
||||||
|
tracing::subscriber::set_global_default(subscriber)?;
|
||||||
|
// TODO: read configs from a different place!
|
||||||
|
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
|
||||||
|
let cfg_file_contents = std::fs::read_to_string(cfg_file_path)
|
||||||
|
.with_context(|| format!( "Failed to read pageserver config at '{}'", cfg_file_path.display()))?;
|
||||||
|
let toml = cfg_file_contents
|
||||||
|
.parse::<toml_edit::Document>()
|
||||||
|
.with_context(|| format!( "Failed to parse '{}' as pageserver config", cfg_file_path.display()))?;
|
||||||
|
let remote_storage_data = toml.get("remote_storage")
|
||||||
|
.context("field should be present")?;
|
||||||
|
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)?
|
||||||
|
.context("error configuring remote storage")?;
|
||||||
|
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
|
||||||
|
|
||||||
|
let folder = RemotePath::new(Path::new("public_extensions"))?;
|
||||||
|
// lists all the files in the public_extensions folder
|
||||||
|
let from_paths = remote_storage.list_files(Some(&folder)).await?;
|
||||||
|
alek!(from_paths);
|
||||||
|
for remote_from_path in from_paths {
|
||||||
|
if remote_from_path.extension() == Some("control") {
|
||||||
|
let file_name = remote_from_path.object_name().expect("it must exist");
|
||||||
|
info!("{:?}", file_name);
|
||||||
|
alek!(&remote_from_path);
|
||||||
|
// download the file
|
||||||
|
let mut download = remote_storage.download(&remote_from_path).await?;
|
||||||
|
// write the file to a local location
|
||||||
|
let mut write_data_buffer = Vec::new();
|
||||||
|
download.download_stream.read_to_end(&mut write_data_buffer).await?;
|
||||||
|
let mut output_file = BufWriter::new(File::create(file_name)?);
|
||||||
|
output_file.write_all(&mut write_data_buffer)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
65
alek_ext/src/old_multiple_files.rs
Normal file
65
alek_ext/src/old_multiple_files.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
**WIP**
|
||||||
|
* This is a MWE of using our RemoteStorage API to call the aws stuff and download multiple files
|
||||||
|
*/
|
||||||
|
|
||||||
|
#![allow(unused_imports)]
|
||||||
|
use remote_storage::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{BufWriter, Write};
|
||||||
|
use toml_edit;
|
||||||
|
use anyhow::{self, Context};
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
/* me trying to hack RemotePath into submission */
|
||||||
|
|
||||||
|
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
|
||||||
|
let cfg_file_contents = std::fs::read_to_string(cfg_file_path)
|
||||||
|
.expect("couldn't find pageserver.toml; make sure you are in neon/alek_ext");
|
||||||
|
let toml = cfg_file_contents
|
||||||
|
.parse::<toml_edit::Document>()
|
||||||
|
.expect("Error parsing toml");
|
||||||
|
let remote_storage_data = toml.get("remote_storage")
|
||||||
|
.expect("field should be present");
|
||||||
|
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)
|
||||||
|
.expect("error parsing toml")
|
||||||
|
.expect("error parsing toml");
|
||||||
|
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
|
||||||
|
|
||||||
|
if let GenericRemoteStorage::AwsS3(mybucket) = remote_storage {
|
||||||
|
let resp = mybucket
|
||||||
|
.client
|
||||||
|
.list_objects_v2()
|
||||||
|
.bucket("neon-dev-extensions")
|
||||||
|
.set_prefix(Some("public_extensions".to_string()))
|
||||||
|
.delimiter("/".to_string())
|
||||||
|
.send().await?;
|
||||||
|
|
||||||
|
let z = resp.common_prefixes.unwrap();
|
||||||
|
for yy in z {
|
||||||
|
println!("plzplz: {:?}",yy);
|
||||||
|
}
|
||||||
|
let mut i = 0;
|
||||||
|
for remote_from_path in from_paths {
|
||||||
|
i += 1;
|
||||||
|
println!("{:?}", &remote_from_path);
|
||||||
|
if remote_from_path.extension() == Some("control") {
|
||||||
|
let mut data = remote_storage.download(&remote_from_path).await?;
|
||||||
|
// write `data` to a file locally
|
||||||
|
// TODO: I think that the way I'm doing this is not optimal;
|
||||||
|
// It should be possible to write the data directly to a file
|
||||||
|
// rather than first writing it to a vector...
|
||||||
|
let mut write_data_buffer = Vec::new();
|
||||||
|
data.download_stream.read_to_end(&mut write_data_buffer).await?;
|
||||||
|
let f = File::create("alek{i}.out").expect("problem creating file");
|
||||||
|
let mut f = BufWriter::new(f);
|
||||||
|
f.write_all(&mut write_data_buffer).expect("error writing data");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -11,15 +11,24 @@ clap.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
|
num_cpus.workspace = true
|
||||||
|
opentelemetry.workspace = true
|
||||||
postgres.workspace = true
|
postgres.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
|
reqwest = { workspace = true, features = ["json"] }
|
||||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||||
tokio-postgres.workspace = true
|
tokio-postgres.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
tracing-opentelemetry.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
|
tracing-utils.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|
||||||
|
compute_api.workspace = true
|
||||||
|
utils.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
remote_storage = { version = "0.1", path = "../libs/remote_storage/" }
|
||||||
|
toml_edit.workspace = true
|
||||||
|
|||||||
@@ -30,96 +30,211 @@
|
|||||||
//! -b /usr/local/bin/postgres
|
//! -b /usr/local/bin/postgres
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::panic;
|
use std::panic;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{mpsc, Arc, Condvar, Mutex};
|
||||||
use std::{thread, time::Duration};
|
use std::{thread, time::Duration};
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
use compute_api::responses::ComputeStatus;
|
||||||
|
|
||||||
|
use compute_tools::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use compute_tools::configurator::launch_configurator;
|
||||||
use compute_tools::http::api::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::informant::spawn_vm_informant_if_present;
|
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
use compute_tools::pg_helpers::*;
|
|
||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use url::Url;
|
use compute_tools::extensions::*;
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
#[tokio::main]
|
||||||
init_logger(DEFAULT_LOG_LEVEL)?;
|
async fn main() -> Result<()> {
|
||||||
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
let matches = cli().get_matches();
|
let matches = cli().get_matches();
|
||||||
|
let config = get_s3_config(&matches)
|
||||||
|
.expect("Hopefully get_s3_config works");
|
||||||
|
download_extension(&config, ExtensionType::Shared)
|
||||||
|
.await
|
||||||
|
.expect("Assume downloads can't error.");
|
||||||
|
// let mut file = File::create("alek.txt")?;
|
||||||
|
// file.write_all(b"success?")?;
|
||||||
|
|
||||||
|
let http_port = *matches
|
||||||
|
.get_one::<u16>("http-port")
|
||||||
|
.expect("http-port is required");
|
||||||
let pgdata = matches
|
let pgdata = matches
|
||||||
.get_one::<String>("pgdata")
|
.get_one::<String>("pgdata")
|
||||||
.expect("PGDATA path is required");
|
.expect("PGDATA path is required");
|
||||||
let connstr = matches
|
let connstr = matches
|
||||||
.get_one::<String>("connstr")
|
.get_one::<String>("connstr")
|
||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec = matches.get_one::<String>("spec");
|
let spec_json = matches.get_one::<String>("spec");
|
||||||
let spec_path = matches.get_one::<String>("spec-path");
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
|
||||||
|
// Extract OpenTelemetry context for the startup actions from the
|
||||||
|
// TRACEPARENT and TRACESTATE env variables, and attach it to the current
|
||||||
|
// tracing context.
|
||||||
|
//
|
||||||
|
// This is used to propagate the context for the 'start_compute' operation
|
||||||
|
// from the neon control plane. This allows linking together the wider
|
||||||
|
// 'start_compute' operation that creates the compute container, with the
|
||||||
|
// startup actions here within the container.
|
||||||
|
//
|
||||||
|
// There is no standard for passing context in env variables, but a lot of
|
||||||
|
// tools use TRACEPARENT/TRACESTATE, so we use that convention too. See
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
||||||
|
//
|
||||||
|
// Switch to the startup context here, and exit it once the startup has
|
||||||
|
// completed and Postgres is up and running.
|
||||||
|
//
|
||||||
|
// If this pod is pre-created without binding it to any particular endpoint
|
||||||
|
// yet, this isn't the right place to enter the startup context. In that
|
||||||
|
// case, the control plane should pass the tracing context as part of the
|
||||||
|
// /configure API call.
|
||||||
|
//
|
||||||
|
// NOTE: This is supposed to only cover the *startup* actions. Once
|
||||||
|
// postgres is configured and up-and-running, we exit this span. Any other
|
||||||
|
// actions that are performed on incoming HTTP requests, for example, are
|
||||||
|
// performed in separate spans.
|
||||||
|
//
|
||||||
|
// XXX: If the pod is restarted, we perform the startup actions in the same
|
||||||
|
// context as the original startup actions, which probably doesn't make
|
||||||
|
// sense.
|
||||||
|
let mut startup_tracing_carrier: HashMap<String, String> = HashMap::new();
|
||||||
|
if let Ok(val) = std::env::var("TRACEPARENT") {
|
||||||
|
startup_tracing_carrier.insert("traceparent".to_string(), val);
|
||||||
|
}
|
||||||
|
if let Ok(val) = std::env::var("TRACESTATE") {
|
||||||
|
startup_tracing_carrier.insert("tracestate".to_string(), val);
|
||||||
|
}
|
||||||
|
let startup_context_guard = if !startup_tracing_carrier.is_empty() {
|
||||||
|
use opentelemetry::propagation::TextMapPropagator;
|
||||||
|
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||||
|
let guard = TraceContextPropagator::new()
|
||||||
|
.extract(&startup_tracing_carrier)
|
||||||
|
.attach();
|
||||||
|
info!("startup tracing context attached");
|
||||||
|
Some(guard)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
// Try to use just 'postgres' if no path is provided
|
// Try to use just 'postgres' if no path is provided
|
||||||
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
||||||
|
|
||||||
let spec: ComputeSpec = match spec {
|
let spec;
|
||||||
|
let mut live_config_allowed = false;
|
||||||
|
match spec_json {
|
||||||
// First, try to get cluster spec from the cli argument
|
// First, try to get cluster spec from the cli argument
|
||||||
Some(json) => serde_json::from_str(json)?,
|
Some(json) => {
|
||||||
|
spec = Some(serde_json::from_str(json)?);
|
||||||
|
}
|
||||||
None => {
|
None => {
|
||||||
// Second, try to read it from the file if path is provided
|
// Second, try to read it from the file if path is provided
|
||||||
if let Some(sp) = spec_path {
|
if let Some(sp) = spec_path {
|
||||||
let path = Path::new(sp);
|
let path = Path::new(sp);
|
||||||
let file = File::open(path)?;
|
let file = File::open(path)?;
|
||||||
serde_json::from_reader(file)?
|
spec = Some(serde_json::from_reader(file)?);
|
||||||
|
} else if let Some(id) = compute_id {
|
||||||
|
if let Some(cp_base) = control_plane_uri {
|
||||||
|
live_config_allowed = true;
|
||||||
|
spec = match get_spec_from_control_plane(cp_base, id) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
error!("cannot get response from control plane: {}", e);
|
||||||
|
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
panic!("cluster spec should be provided via --spec or --spec-path argument");
|
panic!(
|
||||||
|
"compute spec should be provided by one of the following ways: \
|
||||||
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let pageserver_connstr = spec
|
let mut new_state = ComputeState::new();
|
||||||
.cluster
|
let spec_set;
|
||||||
.settings
|
if let Some(spec) = spec {
|
||||||
.find("neon.pageserver_connstring")
|
let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
|
||||||
.expect("pageserver connstr should be provided");
|
new_state.pspec = Some(pspec);
|
||||||
let tenant = spec
|
spec_set = true;
|
||||||
.cluster
|
} else {
|
||||||
.settings
|
spec_set = false;
|
||||||
.find("neon.tenant_id")
|
}
|
||||||
.expect("tenant id should be provided");
|
let compute_node = ComputeNode {
|
||||||
let timeline = spec
|
|
||||||
.cluster
|
|
||||||
.settings
|
|
||||||
.find("neon.timeline_id")
|
|
||||||
.expect("tenant id should be provided");
|
|
||||||
|
|
||||||
let compute_state = ComputeNode {
|
|
||||||
start_time: Utc::now(),
|
|
||||||
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
|
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
|
||||||
pgdata: pgdata.to_string(),
|
pgdata: pgdata.to_string(),
|
||||||
pgbin: pgbin.to_string(),
|
pgbin: pgbin.to_string(),
|
||||||
spec,
|
live_config_allowed,
|
||||||
tenant,
|
state: Mutex::new(new_state),
|
||||||
timeline,
|
state_changed: Condvar::new(),
|
||||||
pageserver_connstr,
|
|
||||||
metrics: ComputeMetrics::default(),
|
|
||||||
state: RwLock::new(ComputeState::new()),
|
|
||||||
};
|
};
|
||||||
let compute = Arc::new(compute_state);
|
let compute = Arc::new(compute_node);
|
||||||
|
|
||||||
// Launch service threads first, so we were able to serve availability
|
// Launch http service first, so we were able to serve control-plane
|
||||||
// requests, while configuration is still in progress.
|
// requests, while configuration is still in progress.
|
||||||
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
let _http_handle =
|
||||||
|
launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
|
||||||
|
|
||||||
|
if !spec_set {
|
||||||
|
// No spec provided, hang waiting for it.
|
||||||
|
info!("no compute spec provided, waiting");
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::ConfigurationPending {
|
||||||
|
state = compute.state_changed.wait(state).unwrap();
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::ConfigurationPending {
|
||||||
|
info!("got spec, continue configuration");
|
||||||
|
// Spec is already set by the http server handler.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We got all we need, update the state.
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
|
||||||
|
// Now we have the spec, and also the tenant id, so we can download the user's personal extensions
|
||||||
|
// download_extension(&config, ExtensionType::Tenant(FIXME tenant_id.into()));
|
||||||
|
|
||||||
|
// Record for how long we slept waiting for the spec.
|
||||||
|
state.metrics.wait_for_spec_ms = Utc::now()
|
||||||
|
.signed_duration_since(state.start_time)
|
||||||
|
.to_std()
|
||||||
|
.unwrap()
|
||||||
|
.as_millis() as u64;
|
||||||
|
// Reset start time to the actual start of the configuration, so that
|
||||||
|
// total startup time was properly measured at the end.
|
||||||
|
state.start_time = Utc::now();
|
||||||
|
|
||||||
|
state.status = ComputeStatus::Init;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
|
||||||
|
// Launch remaining service threads
|
||||||
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
||||||
// Also spawn the thread responsible for handling the VM informant -- if it's present
|
let _configurator_handle =
|
||||||
let _vm_informant_handle = spawn_vm_informant_if_present().expect("cannot launch VM informant");
|
launch_configurator(&compute).expect("cannot launch configurator thread");
|
||||||
|
|
||||||
|
// Now we are ready to download library extensions
|
||||||
|
// download_extension(&config, ExtensionType::Library(FIXME library_name.into()));
|
||||||
|
|
||||||
// Start Postgres
|
// Start Postgres
|
||||||
let mut delay_exit = false;
|
let mut delay_exit = false;
|
||||||
@@ -128,7 +243,7 @@ fn main() -> Result<()> {
|
|||||||
Ok(pg) => Some(pg),
|
Ok(pg) => Some(pg),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("could not start the compute node: {:?}", err);
|
error!("could not start the compute node: {:?}", err);
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
state.error = Some(format!("{:?}", err));
|
state.error = Some(format!("{:?}", err));
|
||||||
state.status = ComputeStatus::Failed;
|
state.status = ComputeStatus::Failed;
|
||||||
drop(state);
|
drop(state);
|
||||||
@@ -140,6 +255,9 @@ fn main() -> Result<()> {
|
|||||||
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
||||||
// propagate to Postgres and it will be shut down as well.
|
// propagate to Postgres and it will be shut down as well.
|
||||||
if let Some(mut pg) = pg {
|
if let Some(mut pg) = pg {
|
||||||
|
// Startup is finished, exit the startup tracing span
|
||||||
|
drop(startup_context_guard);
|
||||||
|
|
||||||
let ecode = pg
|
let ecode = pg
|
||||||
.wait()
|
.wait()
|
||||||
.expect("failed to start waiting on Postgres process");
|
.expect("failed to start waiting on Postgres process");
|
||||||
@@ -156,9 +274,29 @@ fn main() -> Result<()> {
|
|||||||
if delay_exit {
|
if delay_exit {
|
||||||
info!("giving control plane 30s to collect the error before shutdown");
|
info!("giving control plane 30s to collect the error before shutdown");
|
||||||
thread::sleep(Duration::from_secs(30));
|
thread::sleep(Duration::from_secs(30));
|
||||||
info!("shutting down");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||||
|
// pending traces before we exit. Shutting down OTEL tracing provider may
|
||||||
|
// hang for quite some time, see, for example:
|
||||||
|
// - https://github.com/open-telemetry/opentelemetry-rust/issues/868
|
||||||
|
// - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
|
||||||
|
//
|
||||||
|
// Yet, we want computes to shut down fast enough, as we may need a new one
|
||||||
|
// for the same timeline ASAP. So wait no longer than 2s for the shutdown to
|
||||||
|
// complete, then just error out and exit the main thread.
|
||||||
|
info!("shutting down tracing");
|
||||||
|
let (sender, receiver) = mpsc::channel();
|
||||||
|
let _ = thread::spawn(move || {
|
||||||
|
tracing_utils::shutdown_tracing();
|
||||||
|
sender.send(()).ok()
|
||||||
|
});
|
||||||
|
let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
|
||||||
|
if shutdown_res.is_err() {
|
||||||
|
error!("timed out while shutting down tracing, exiting anyway");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("shutting down");
|
||||||
exit(exit_code.unwrap_or(1))
|
exit(exit_code.unwrap_or(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,6 +305,14 @@ fn cli() -> clap::Command {
|
|||||||
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
||||||
clap::Command::new("compute_ctl")
|
clap::Command::new("compute_ctl")
|
||||||
.version(version)
|
.version(version)
|
||||||
|
.arg(
|
||||||
|
Arg::new("http-port")
|
||||||
|
.long("http-port")
|
||||||
|
.value_name("HTTP_PORT")
|
||||||
|
.default_value("3080")
|
||||||
|
.value_parser(clap::value_parser!(u16))
|
||||||
|
.required(false),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("connstr")
|
Arg::new("connstr")
|
||||||
.short('C')
|
.short('C')
|
||||||
@@ -200,6 +346,18 @@ fn cli() -> clap::Command {
|
|||||||
.long("spec-path")
|
.long("spec-path")
|
||||||
.value_name("SPEC_PATH"),
|
.value_name("SPEC_PATH"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("compute-id")
|
||||||
|
.short('i')
|
||||||
|
.long("compute-id")
|
||||||
|
.value_name("COMPUTE_ID"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("control-plane-uri")
|
||||||
|
.short('p')
|
||||||
|
.long("control-plane-uri")
|
||||||
|
.value_name("CONTROL_PLANE_API_BASE_URI"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,12 +1,28 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use postgres::Client;
|
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{error, instrument};
|
use tracing::{error, instrument};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
/// Update timestamp in a row in a special service table to check
|
||||||
|
/// that we can actually write some data in this particular timeline.
|
||||||
|
/// Create table if it's missing.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
|
// Connect to the database.
|
||||||
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
|
if client.is_closed() {
|
||||||
|
return Err(anyhow!("connection to postgres closed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The connection object performs the actual communication with the database,
|
||||||
|
// so spawn it off to run on its own.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = connection.await {
|
||||||
|
error!("connection error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let query = "
|
let query = "
|
||||||
CREATE TABLE IF NOT EXISTS health_check (
|
CREATE TABLE IF NOT EXISTS health_check (
|
||||||
id serial primary key,
|
id serial primary key,
|
||||||
@@ -15,31 +31,15 @@ pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
|||||||
INSERT INTO health_check VALUES (1, now())
|
INSERT INTO health_check VALUES (1, now())
|
||||||
ON CONFLICT (id) DO UPDATE
|
ON CONFLICT (id) DO UPDATE
|
||||||
SET updated_at = now();";
|
SET updated_at = now();";
|
||||||
let result = client.simple_query(query)?;
|
|
||||||
if result.len() < 2 {
|
let result = client.simple_query(query).await?;
|
||||||
return Err(anyhow::format_err!("executed {} queries", result.len()));
|
|
||||||
}
|
if result.len() != 2 {
|
||||||
Ok(())
|
return Err(anyhow::format_err!(
|
||||||
}
|
"expected 2 query results, but got {}",
|
||||||
|
result.len()
|
||||||
#[instrument(skip_all)]
|
));
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
|
||||||
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
|
||||||
if client.is_closed() {
|
|
||||||
return Err(anyhow!("connection to postgres closed"));
|
|
||||||
}
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = connection.await {
|
|
||||||
error!("connection error: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = client
|
|
||||||
.simple_query("UPDATE health_check SET updated_at = now() WHERE id = 1;")
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if result.len() != 1 {
|
|
||||||
return Err(anyhow!("statement can't be executed"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,78 +1,73 @@
|
|||||||
//
|
|
||||||
// XXX: This starts to be scarry similar to the `PostgresNode` from `control_plane`,
|
|
||||||
// but there are several things that makes `PostgresNode` usage inconvenient in the
|
|
||||||
// cloud:
|
|
||||||
// - it inherits from `LocalEnv`, which contains **all-all** the information about
|
|
||||||
// a complete service running
|
|
||||||
// - it uses `PageServerNode` with information about http endpoint, which we do not
|
|
||||||
// need in the cloud again
|
|
||||||
// - many tiny pieces like, for example, we do not use `pg_ctl` in the cloud
|
|
||||||
//
|
|
||||||
// Thus, to use `PostgresNode` in the cloud, we need to 'mock' a bunch of required
|
|
||||||
// attributes (not required for the cloud). Yet, it is still tempting to unify these
|
|
||||||
// `PostgresNode` and `ComputeNode` and use one in both places.
|
|
||||||
//
|
|
||||||
// TODO: stabilize `ComputeNode` and think about using it in the `control_plane`.
|
|
||||||
//
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::str::FromStr;
|
||||||
use std::sync::RwLock;
|
use std::sync::{Condvar, Mutex};
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::{Serialize, Serializer};
|
use tokio_postgres;
|
||||||
use tracing::{info, instrument, warn};
|
use tracing::{info, instrument, warn};
|
||||||
|
use utils::id::{TenantId, TimelineId};
|
||||||
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
|
use compute_api::responses::{ComputeMetrics, ComputeStatus};
|
||||||
|
use compute_api::spec::{ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
use crate::checker::create_writability_check_data;
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
|
|
||||||
/// Compute node info shared across several `compute_ctl` threads.
|
/// Compute node info shared across several `compute_ctl` threads.
|
||||||
pub struct ComputeNode {
|
pub struct ComputeNode {
|
||||||
pub start_time: DateTime<Utc>,
|
|
||||||
// Url type maintains proper escaping
|
// Url type maintains proper escaping
|
||||||
pub connstr: url::Url,
|
pub connstr: url::Url,
|
||||||
pub pgdata: String,
|
pub pgdata: String,
|
||||||
pub pgbin: String,
|
pub pgbin: String,
|
||||||
pub spec: ComputeSpec,
|
/// We should only allow live re- / configuration of the compute node if
|
||||||
pub tenant: String,
|
/// it uses 'pull model', i.e. it can go to control-plane and fetch
|
||||||
pub timeline: String,
|
/// the latest configuration. Otherwise, there could be a case:
|
||||||
pub pageserver_connstr: String,
|
/// - we start compute with some spec provided as argument
|
||||||
pub metrics: ComputeMetrics,
|
/// - we push new spec and it does reconfiguration
|
||||||
/// Volatile part of the `ComputeNode` so should be used under `RwLock`
|
/// - but then something happens and compute pod / VM is destroyed,
|
||||||
/// to allow HTTP API server to serve status requests, while configuration
|
/// so k8s controller starts it again with the **old** spec
|
||||||
/// is in progress.
|
/// and the same for empty computes:
|
||||||
pub state: RwLock<ComputeState>,
|
/// - we started compute without any spec
|
||||||
|
/// - we push spec and it does configuration
|
||||||
|
/// - but then it is restarted without any spec again
|
||||||
|
pub live_config_allowed: bool,
|
||||||
|
/// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
|
||||||
|
/// To allow HTTP API server to serving status requests, while configuration
|
||||||
|
/// is in progress, lock should be held only for short periods of time to do
|
||||||
|
/// read/write, not the whole configuration process.
|
||||||
|
pub state: Mutex<ComputeState>,
|
||||||
|
/// `Condvar` to allow notifying waiters about state changes.
|
||||||
|
pub state_changed: Condvar,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rfc3339_serialize<S>(x: &DateTime<Utc>, s: S) -> Result<S::Ok, S::Error>
|
#[derive(Clone, Debug)]
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
x.to_rfc3339().serialize(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
#[serde(rename_all = "snake_case")]
|
|
||||||
pub struct ComputeState {
|
pub struct ComputeState {
|
||||||
|
pub start_time: DateTime<Utc>,
|
||||||
pub status: ComputeStatus,
|
pub status: ComputeStatus,
|
||||||
/// Timestamp of the last Postgres activity
|
/// Timestamp of the last Postgres activity. It could be `None` if
|
||||||
#[serde(serialize_with = "rfc3339_serialize")]
|
/// compute wasn't used since start.
|
||||||
pub last_active: DateTime<Utc>,
|
pub last_active: Option<DateTime<Utc>>,
|
||||||
pub error: Option<String>,
|
pub error: Option<String>,
|
||||||
|
pub pspec: Option<ParsedSpec>,
|
||||||
|
pub metrics: ComputeMetrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ComputeState {
|
impl ComputeState {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
status: ComputeStatus::Init,
|
start_time: Utc::now(),
|
||||||
last_active: Utc::now(),
|
status: ComputeStatus::Empty,
|
||||||
|
last_active: None,
|
||||||
error: None,
|
error: None,
|
||||||
|
pspec: None,
|
||||||
|
metrics: ComputeMetrics::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -83,29 +78,70 @@ impl Default for ComputeState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Clone, Copy, PartialEq, Eq)]
|
#[derive(Clone, Debug)]
|
||||||
#[serde(rename_all = "snake_case")]
|
pub struct ParsedSpec {
|
||||||
pub enum ComputeStatus {
|
pub spec: ComputeSpec,
|
||||||
Init,
|
pub tenant_id: TenantId,
|
||||||
Running,
|
pub timeline_id: TimelineId,
|
||||||
Failed,
|
pub pageserver_connstr: String,
|
||||||
|
pub storage_auth_token: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Serialize)]
|
impl TryFrom<ComputeSpec> for ParsedSpec {
|
||||||
pub struct ComputeMetrics {
|
type Error = String;
|
||||||
pub sync_safekeepers_ms: AtomicU64,
|
fn try_from(spec: ComputeSpec) -> Result<Self, String> {
|
||||||
pub basebackup_ms: AtomicU64,
|
// Extract the options from the spec file that are needed to connect to
|
||||||
pub config_ms: AtomicU64,
|
// the storage system.
|
||||||
pub total_startup_ms: AtomicU64,
|
//
|
||||||
|
// For backwards-compatibility, the top-level fields in the spec file
|
||||||
|
// may be empty. In that case, we need to dig them from the GUCs in the
|
||||||
|
// cluster.settings field.
|
||||||
|
let pageserver_connstr = spec
|
||||||
|
.pageserver_connstring
|
||||||
|
.clone()
|
||||||
|
.or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
|
||||||
|
.ok_or("pageserver connstr should be provided")?;
|
||||||
|
let storage_auth_token = spec.storage_auth_token.clone();
|
||||||
|
let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
|
||||||
|
tenant_id
|
||||||
|
} else {
|
||||||
|
spec.cluster
|
||||||
|
.settings
|
||||||
|
.find("neon.tenant_id")
|
||||||
|
.ok_or("tenant id should be provided")
|
||||||
|
.map(|s| TenantId::from_str(&s))?
|
||||||
|
.or(Err("invalid tenant id"))?
|
||||||
|
};
|
||||||
|
let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
|
||||||
|
timeline_id
|
||||||
|
} else {
|
||||||
|
spec.cluster
|
||||||
|
.settings
|
||||||
|
.find("neon.timeline_id")
|
||||||
|
.ok_or("timeline id should be provided")
|
||||||
|
.map(|s| TimelineId::from_str(&s))?
|
||||||
|
.or(Err("invalid timeline id"))?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedSpec {
|
||||||
|
spec,
|
||||||
|
pageserver_connstr,
|
||||||
|
storage_auth_token,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ComputeNode {
|
impl ComputeNode {
|
||||||
pub fn set_status(&self, status: ComputeStatus) {
|
pub fn set_status(&self, status: ComputeStatus) {
|
||||||
self.state.write().unwrap().status = status;
|
let mut state = self.state.lock().unwrap();
|
||||||
|
state.status = status;
|
||||||
|
self.state_changed.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_status(&self) -> ComputeStatus {
|
pub fn get_status(&self) -> ComputeStatus {
|
||||||
self.state.read().unwrap().status
|
self.state.lock().unwrap().status
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove `pgdata` directory and create it again with right permissions.
|
// Remove `pgdata` directory and create it again with right permissions.
|
||||||
@@ -121,14 +157,26 @@ impl ComputeNode {
|
|||||||
|
|
||||||
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
||||||
// unarchive it to `pgdata` directory overriding all its previous content.
|
// unarchive it to `pgdata` directory overriding all its previous content.
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self, compute_state))]
|
||||||
fn get_basebackup(&self, lsn: &str) -> Result<()> {
|
fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
|
||||||
|
let spec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let mut client = Client::connect(&self.pageserver_connstr, NoTls)?;
|
let mut config = postgres::Config::from_str(&spec.pageserver_connstr)?;
|
||||||
|
|
||||||
|
// Use the storage auth token from the config file, if given.
|
||||||
|
// Note: this overrides any password set in the connection string.
|
||||||
|
if let Some(storage_auth_token) = &spec.storage_auth_token {
|
||||||
|
info!("Got storage auth token from spec file");
|
||||||
|
config.password(storage_auth_token);
|
||||||
|
} else {
|
||||||
|
info!("Storage auth token not set");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut client = config.connect(NoTls)?;
|
||||||
let basebackup_cmd = match lsn {
|
let basebackup_cmd = match lsn {
|
||||||
"0/0" => format!("basebackup {} {}", &self.tenant, &self.timeline), // First start of the compute
|
Lsn(0) => format!("basebackup {} {}", spec.tenant_id, spec.timeline_id), // First start of the compute
|
||||||
_ => format!("basebackup {} {} {}", &self.tenant, &self.timeline, lsn),
|
_ => format!("basebackup {} {} {}", spec.tenant_id, spec.timeline_id, lsn),
|
||||||
};
|
};
|
||||||
let copyreader = client.copy_out(basebackup_cmd.as_str())?;
|
let copyreader = client.copy_out(basebackup_cmd.as_str())?;
|
||||||
|
|
||||||
@@ -141,27 +189,28 @@ impl ComputeNode {
|
|||||||
ar.set_ignore_zeros(true);
|
ar.set_ignore_zeros(true);
|
||||||
ar.unpack(&self.pgdata)?;
|
ar.unpack(&self.pgdata)?;
|
||||||
|
|
||||||
self.metrics.basebackup_ms.store(
|
self.state.lock().unwrap().metrics.basebackup_ms = Utc::now()
|
||||||
Utc::now()
|
.signed_duration_since(start_time)
|
||||||
.signed_duration_since(start_time)
|
.to_std()
|
||||||
.to_std()
|
.unwrap()
|
||||||
.unwrap()
|
.as_millis() as u64;
|
||||||
.as_millis() as u64,
|
|
||||||
Ordering::Relaxed,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
||||||
// and return the reported LSN back to the caller.
|
// and return the reported LSN back to the caller.
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self, storage_auth_token))]
|
||||||
fn sync_safekeepers(&self) -> Result<String> {
|
fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let sync_handle = Command::new(&self.pgbin)
|
let sync_handle = Command::new(&self.pgbin)
|
||||||
.args(["--sync-safekeepers"])
|
.args(["--sync-safekeepers"])
|
||||||
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
||||||
|
.envs(if let Some(storage_auth_token) = &storage_auth_token {
|
||||||
|
vec![("NEON_AUTH_TOKEN", storage_auth_token)]
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
})
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("postgres --sync-safekeepers failed to start");
|
.expect("postgres --sync-safekeepers failed to start");
|
||||||
@@ -182,63 +231,92 @@ impl ComputeNode {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.metrics.sync_safekeepers_ms.store(
|
self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
|
||||||
Utc::now()
|
.signed_duration_since(start_time)
|
||||||
.signed_duration_since(start_time)
|
.to_std()
|
||||||
.to_std()
|
.unwrap()
|
||||||
.unwrap()
|
.as_millis() as u64;
|
||||||
.as_millis() as u64,
|
|
||||||
Ordering::Relaxed,
|
|
||||||
);
|
|
||||||
|
|
||||||
let lsn = String::from(String::from_utf8(sync_output.stdout)?.trim());
|
let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
|
||||||
|
|
||||||
Ok(lsn)
|
Ok(lsn)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Do all the preparations like PGDATA directory creation, configuration,
|
/// Do all the preparations like PGDATA directory creation, configuration,
|
||||||
/// safekeepers sync, basebackup, etc.
|
/// safekeepers sync, basebackup, etc.
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self, compute_state))]
|
||||||
pub fn prepare_pgdata(&self) -> Result<()> {
|
pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
|
||||||
let spec = &self.spec;
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
|
let spec = &pspec.spec;
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
|
|
||||||
// Remove/create an empty pgdata directory and put configuration there.
|
// Remove/create an empty pgdata directory and put configuration there.
|
||||||
self.create_pgdata()?;
|
self.create_pgdata()?;
|
||||||
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?;
|
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), &pspec.spec)?;
|
||||||
|
|
||||||
info!("starting safekeepers syncing");
|
// Syncing safekeepers is only safe with primary nodes: if a primary
|
||||||
let lsn = self
|
// is already connected it will be kicked out, so a secondary (standby)
|
||||||
.sync_safekeepers()
|
// cannot sync safekeepers.
|
||||||
.with_context(|| "failed to sync safekeepers")?;
|
let lsn = match spec.mode {
|
||||||
info!("safekeepers synced at LSN {}", lsn);
|
ComputeMode::Primary => {
|
||||||
|
info!("starting safekeepers syncing");
|
||||||
|
let lsn = self
|
||||||
|
.sync_safekeepers(pspec.storage_auth_token.clone())
|
||||||
|
.with_context(|| "failed to sync safekeepers")?;
|
||||||
|
info!("safekeepers synced at LSN {}", lsn);
|
||||||
|
lsn
|
||||||
|
}
|
||||||
|
ComputeMode::Static(lsn) => {
|
||||||
|
info!("Starting read-only node at static LSN {}", lsn);
|
||||||
|
lsn
|
||||||
|
}
|
||||||
|
ComputeMode::Replica => {
|
||||||
|
info!("Initializing standby from latest Pageserver LSN");
|
||||||
|
Lsn(0)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"getting basebackup@{} from pageserver {}",
|
"getting basebackup@{} from pageserver {}",
|
||||||
lsn, &self.pageserver_connstr
|
lsn, &pspec.pageserver_connstr
|
||||||
);
|
);
|
||||||
self.get_basebackup(&lsn).with_context(|| {
|
self.get_basebackup(compute_state, lsn).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"failed to get basebackup@{} from pageserver {}",
|
"failed to get basebackup@{} from pageserver {}",
|
||||||
lsn, &self.pageserver_connstr
|
lsn, &pspec.pageserver_connstr
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Update pg_hba.conf received with basebackup.
|
// Update pg_hba.conf received with basebackup.
|
||||||
update_pg_hba(pgdata_path)?;
|
update_pg_hba(pgdata_path)?;
|
||||||
|
|
||||||
|
match spec.mode {
|
||||||
|
ComputeMode::Primary => {}
|
||||||
|
ComputeMode::Replica | ComputeMode::Static(..) => {
|
||||||
|
add_standby_signal(pgdata_path)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Start Postgres as a child process and manage DBs/roles.
|
/// Start Postgres as a child process and manage DBs/roles.
|
||||||
/// After that this will hang waiting on the postmaster process to exit.
|
/// After that this will hang waiting on the postmaster process to exit.
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn start_postgres(&self) -> Result<std::process::Child> {
|
pub fn start_postgres(
|
||||||
|
&self,
|
||||||
|
storage_auth_token: Option<String>,
|
||||||
|
) -> Result<std::process::Child> {
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
|
|
||||||
// Run postgres as a child process.
|
// Run postgres as a child process.
|
||||||
let mut pg = Command::new(&self.pgbin)
|
let mut pg = Command::new(&self.pgbin)
|
||||||
.args(["-D", &self.pgdata])
|
.args(["-D", &self.pgdata])
|
||||||
|
.envs(if let Some(storage_auth_token) = &storage_auth_token {
|
||||||
|
vec![("NEON_AUTH_TOKEN", storage_auth_token)]
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
})
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("cannot start postgres process");
|
.expect("cannot start postgres process");
|
||||||
|
|
||||||
@@ -247,8 +325,9 @@ impl ComputeNode {
|
|||||||
Ok(pg)
|
Ok(pg)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
/// Do initial configuration of the already started Postgres.
|
||||||
pub fn apply_config(&self) -> Result<()> {
|
#[instrument(skip(self, compute_state))]
|
||||||
|
pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
|
||||||
// If connection fails,
|
// If connection fails,
|
||||||
// it may be the old node with `zenith_admin` superuser.
|
// it may be the old node with `zenith_admin` superuser.
|
||||||
//
|
//
|
||||||
@@ -279,18 +358,67 @@ impl ComputeNode {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Proceed with post-startup configuration. Note, that order of operations is important.
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
handle_roles(&self.spec, &mut client)?;
|
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
||||||
handle_databases(&self.spec, &mut client)?;
|
client.simple_query("SET neon.forward_ddl = false")?;
|
||||||
handle_role_deletions(self, &mut client)?;
|
let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
|
||||||
handle_grants(self, &mut client)?;
|
handle_roles(spec, &mut client)?;
|
||||||
create_writability_check_data(&mut client)?;
|
handle_databases(spec, &mut client)?;
|
||||||
|
handle_role_deletions(spec, self.connstr.as_str(), &mut client)?;
|
||||||
|
handle_grants(spec, self.connstr.as_str(), &mut client)?;
|
||||||
|
handle_extensions(spec, &mut client)?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
drop(client);
|
drop(client);
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"finished configuration of compute for project {}",
|
"finished configuration of compute for project {}",
|
||||||
self.spec.cluster.cluster_id
|
spec.cluster.cluster_id.as_deref().unwrap_or("None")
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could've wrapped this around `pg_ctl reload`, but right now we don't use
|
||||||
|
// `pg_ctl` for start / stop, so this just seems much easier to do as we already
|
||||||
|
// have opened connection to Postgres and superuser access.
|
||||||
|
#[instrument(skip(self, client))]
|
||||||
|
fn pg_reload_conf(&self, client: &mut Client) -> Result<()> {
|
||||||
|
client.simple_query("SELECT pg_reload_conf()")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Similar to `apply_config()`, but does a bit different sequence of operations,
|
||||||
|
/// as it's used to reconfigure a previously started and configured Postgres node.
|
||||||
|
#[instrument(skip(self))]
|
||||||
|
pub fn reconfigure(&self) -> Result<()> {
|
||||||
|
let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
|
||||||
|
|
||||||
|
// Write new config
|
||||||
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
|
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), &spec)?;
|
||||||
|
|
||||||
|
let mut client = Client::connect(self.connstr.as_str(), NoTls)?;
|
||||||
|
self.pg_reload_conf(&mut client)?;
|
||||||
|
|
||||||
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
|
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
||||||
|
if spec.mode == ComputeMode::Primary {
|
||||||
|
client.simple_query("SET neon.forward_ddl = false")?;
|
||||||
|
handle_roles(&spec, &mut client)?;
|
||||||
|
handle_databases(&spec, &mut client)?;
|
||||||
|
handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
|
||||||
|
handle_grants(&spec, self.connstr.as_str(), &mut client)?;
|
||||||
|
handle_extensions(&spec, &mut client)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 'Close' connection
|
||||||
|
drop(client);
|
||||||
|
|
||||||
|
let unknown_op = "unknown".to_string();
|
||||||
|
let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
|
||||||
|
info!(
|
||||||
|
"finished reconfiguration of compute node for operation {}",
|
||||||
|
op_id
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -298,40 +426,40 @@ impl ComputeNode {
|
|||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn start_compute(&self) -> Result<std::process::Child> {
|
pub fn start_compute(&self) -> Result<std::process::Child> {
|
||||||
|
let compute_state = self.state.lock().unwrap().clone();
|
||||||
|
let spec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
info!(
|
info!(
|
||||||
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
||||||
self.spec.cluster.cluster_id,
|
spec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
|
||||||
self.spec.operation_uuid.as_ref().unwrap(),
|
spec.spec.operation_uuid.as_deref().unwrap_or("None"),
|
||||||
self.tenant,
|
spec.tenant_id,
|
||||||
self.timeline,
|
spec.timeline_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.prepare_pgdata()?;
|
self.prepare_pgdata(&compute_state)?;
|
||||||
|
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let pg = self.start_postgres()?;
|
let pg = self.start_postgres(spec.storage_auth_token.clone())?;
|
||||||
|
|
||||||
self.apply_config()?;
|
if spec.spec.mode == ComputeMode::Primary {
|
||||||
|
self.apply_config(&compute_state)?;
|
||||||
|
}
|
||||||
|
|
||||||
let startup_end_time = Utc::now();
|
let startup_end_time = Utc::now();
|
||||||
self.metrics.config_ms.store(
|
{
|
||||||
startup_end_time
|
let mut state = self.state.lock().unwrap();
|
||||||
|
state.metrics.config_ms = startup_end_time
|
||||||
.signed_duration_since(start_time)
|
.signed_duration_since(start_time)
|
||||||
.to_std()
|
.to_std()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_millis() as u64,
|
.as_millis() as u64;
|
||||||
Ordering::Relaxed,
|
state.metrics.total_startup_ms = startup_end_time
|
||||||
);
|
.signed_duration_since(compute_state.start_time)
|
||||||
self.metrics.total_startup_ms.store(
|
|
||||||
startup_end_time
|
|
||||||
.signed_duration_since(self.start_time)
|
|
||||||
.to_std()
|
.to_std()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_millis() as u64,
|
.as_millis() as u64;
|
||||||
Ordering::Relaxed,
|
}
|
||||||
);
|
|
||||||
|
|
||||||
self.set_status(ComputeStatus::Running);
|
self.set_status(ComputeStatus::Running);
|
||||||
|
|
||||||
Ok(pg)
|
Ok(pg)
|
||||||
@@ -400,4 +528,43 @@ impl ComputeNode {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Select `pg_stat_statements` data and return it as a stringified JSON
|
||||||
|
pub async fn collect_insights(&self) -> String {
|
||||||
|
let mut result_rows: Vec<String> = Vec::new();
|
||||||
|
let connect_result = tokio_postgres::connect(self.connstr.as_str(), NoTls).await;
|
||||||
|
let (client, connection) = connect_result.unwrap();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = connection.await {
|
||||||
|
eprintln!("connection error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let result = client
|
||||||
|
.simple_query(
|
||||||
|
"SELECT
|
||||||
|
row_to_json(pg_stat_statements)
|
||||||
|
FROM
|
||||||
|
pg_stat_statements
|
||||||
|
WHERE
|
||||||
|
userid != 'cloud_admin'::regrole::oid
|
||||||
|
ORDER BY
|
||||||
|
(mean_exec_time + mean_plan_time) DESC
|
||||||
|
LIMIT 100",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Ok(raw_rows) = result {
|
||||||
|
for message in raw_rows.iter() {
|
||||||
|
if let postgres::SimpleQueryMessage::Row(row) = message {
|
||||||
|
if let Some(json) = row.get(0) {
|
||||||
|
result_rows.push(json.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
|
||||||
|
} else {
|
||||||
|
"{{\"pg_stat_statements\": []}}".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,9 @@ use std::path::Path;
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use crate::pg_helpers::escape_conf_value;
|
||||||
use crate::pg_helpers::PgOptionsSerialize;
|
use crate::pg_helpers::PgOptionsSerialize;
|
||||||
use crate::spec::ComputeSpec;
|
use compute_api::spec::{ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
/// Check that `line` is inside a text file and put it there if it is not.
|
/// Check that `line` is inside a text file and put it there if it is not.
|
||||||
/// Create file if it doesn't exist.
|
/// Create file if it doesn't exist.
|
||||||
@@ -34,18 +35,65 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
|
|||||||
/// Create or completely rewrite configuration file specified by `path`
|
/// Create or completely rewrite configuration file specified by `path`
|
||||||
pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> {
|
pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> {
|
||||||
// File::create() destroys the file content if it exists.
|
// File::create() destroys the file content if it exists.
|
||||||
let mut postgres_conf = File::create(path)?;
|
let mut file = File::create(path)?;
|
||||||
|
|
||||||
write_auto_managed_block(&mut postgres_conf, &spec.cluster.settings.as_pg_settings())?;
|
// Write the postgresql.conf content from the spec file as is.
|
||||||
|
if let Some(conf) = &spec.cluster.postgresql_conf {
|
||||||
Ok(())
|
writeln!(file, "{}", conf)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write Postgres config block wrapped with generated comment section
|
write!(file, "{}", &spec.cluster.settings.as_pg_settings())?;
|
||||||
fn write_auto_managed_block(file: &mut File, buf: &str) -> Result<()> {
|
|
||||||
writeln!(file, "# Managed by compute_ctl: begin")?;
|
// Add options for connecting to storage
|
||||||
writeln!(file, "{}", buf)?;
|
writeln!(file, "# Neon storage settings")?;
|
||||||
writeln!(file, "# Managed by compute_ctl: end")?;
|
if let Some(s) = &spec.pageserver_connstring {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.pageserver_connstring='{}'",
|
||||||
|
escape_conf_value(s)
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
if !spec.safekeeper_connstrings.is_empty() {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.safekeepers='{}'",
|
||||||
|
escape_conf_value(&spec.safekeeper_connstrings.join(","))
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
if let Some(s) = &spec.tenant_id {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.tenant_id='{}'",
|
||||||
|
escape_conf_value(&s.to_string())
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
if let Some(s) = &spec.timeline_id {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.timeline_id='{}'",
|
||||||
|
escape_conf_value(&s.to_string())
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match spec.mode {
|
||||||
|
ComputeMode::Primary => {}
|
||||||
|
ComputeMode::Static(lsn) => {
|
||||||
|
// hot_standby is 'on' by default, but let's be explicit
|
||||||
|
writeln!(file, "hot_standby=on")?;
|
||||||
|
writeln!(file, "recovery_target_lsn='{lsn}'")?;
|
||||||
|
}
|
||||||
|
ComputeMode::Replica => {
|
||||||
|
// hot_standby is 'on' by default, but let's be explicit
|
||||||
|
writeln!(file, "hot_standby=on")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are any extra options in the 'settings' field, append those
|
||||||
|
if spec.cluster.settings.is_some() {
|
||||||
|
writeln!(file, "# Managed by compute_ctl: begin")?;
|
||||||
|
write!(file, "{}", spec.cluster.settings.as_pg_settings())?;
|
||||||
|
writeln!(file, "# Managed by compute_ctl: end")?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
54
compute_tools/src/configurator.rs
Normal file
54
compute_tools/src/configurator.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
use compute_api::responses::ComputeStatus;
|
||||||
|
|
||||||
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
#[instrument(skip(compute))]
|
||||||
|
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||||
|
info!("waiting for reconfiguration requests");
|
||||||
|
loop {
|
||||||
|
let state = compute.state.lock().unwrap();
|
||||||
|
let mut state = compute.state_changed.wait(state).unwrap();
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::ConfigurationPending {
|
||||||
|
info!("got configuration request");
|
||||||
|
state.status = ComputeStatus::Configuration;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
|
||||||
|
let mut new_status = ComputeStatus::Failed;
|
||||||
|
if let Err(e) = compute.reconfigure() {
|
||||||
|
error!("could not configure compute node: {}", e);
|
||||||
|
} else {
|
||||||
|
new_status = ComputeStatus::Running;
|
||||||
|
info!("compute node configured");
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: used to test that API is blocking
|
||||||
|
// std::thread::sleep(std::time::Duration::from_millis(10000));
|
||||||
|
|
||||||
|
compute.set_status(new_status);
|
||||||
|
} else if state.status == ComputeStatus::Failed {
|
||||||
|
info!("compute node is now in Failed state, exiting");
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
info!("woken up for compute status: {:?}, sleeping", state.status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn launch_configurator(compute: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
||||||
|
let compute = Arc::clone(compute);
|
||||||
|
|
||||||
|
Ok(thread::Builder::new()
|
||||||
|
.name("compute-configurator".into())
|
||||||
|
.spawn(move || {
|
||||||
|
configurator_main_loop(&compute);
|
||||||
|
info!("configurator thread is exited");
|
||||||
|
})?)
|
||||||
|
}
|
||||||
96
compute_tools/src/extensions.rs
Normal file
96
compute_tools/src/extensions.rs
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
// This is some code for downloading postgres extensions from AWS s3
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::Write;
|
||||||
|
use clap::{ArgMatches};
|
||||||
|
use toml_edit;
|
||||||
|
use remote_storage::*;
|
||||||
|
|
||||||
|
fn get_pg_config(argument: &str) -> String {
|
||||||
|
// FIXME: this function panics if it runs into any issues
|
||||||
|
let config_output = std::process::Command::new("pg_config")
|
||||||
|
.arg(argument)
|
||||||
|
.output()
|
||||||
|
.expect("pg_config should be installed");
|
||||||
|
assert!(config_output.status.success());
|
||||||
|
let stdout = std::str::from_utf8(&config_output.stdout).unwrap();
|
||||||
|
stdout.trim().to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn download_helper(remote_storage: &GenericRemoteStorage, remote_from_path: &RemotePath, to_path: &str) -> anyhow::Result<()> {
|
||||||
|
let file_name = remote_from_path.object_name().expect("it must exist");
|
||||||
|
info!("Downloading {:?}",file_name);
|
||||||
|
let mut download = remote_storage.download(&remote_from_path).await?;
|
||||||
|
let mut write_data_buffer = Vec::new();
|
||||||
|
download.download_stream.read_to_end(&mut write_data_buffer).await?;
|
||||||
|
let mut output_file = BufWriter::new(File::create(file_name)?);
|
||||||
|
output_file.write_all(&mut write_data_buffer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ExtensionType {
|
||||||
|
Shared,
|
||||||
|
Tenant(String),
|
||||||
|
Library(String)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn download_extension(config: &RemoteStorageConfig, ext_type: ExtensionType) -> anyhow::Result<()>{
|
||||||
|
let sharedir = get_pg_config("--sharedir");
|
||||||
|
let sharedir = format!("{}/extension", sharedir);
|
||||||
|
let libdir = get_pg_config("--libdir");
|
||||||
|
let remote_storage = GenericRemoteStorage::from_config(config)?;
|
||||||
|
|
||||||
|
match ext_type {
|
||||||
|
ExtensionType::Shared => {
|
||||||
|
// 1. Download control files from s3-bucket/public/*.control to SHAREDIR/extension
|
||||||
|
// We can do this step even before we have spec,
|
||||||
|
// because public extensions are common for all projects.
|
||||||
|
let folder = RemotePath::new(Path::new("public_extensions"))?;
|
||||||
|
let from_paths = remote_storage.list_files(Some(&folder)).await?;
|
||||||
|
for remote_from_path in from_paths {
|
||||||
|
if remote_from_path.extension() == Some("control") {
|
||||||
|
// FIXME: CAUTION: if you run this, it will actually write stuff to my postgress directory
|
||||||
|
// but atm that stuff that it is going to write is not good.
|
||||||
|
// don't run atm without changing path
|
||||||
|
download_helper(&remote_storage, &remote_from_path, &sharedir)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ExtensionType::Tenant(tenant_id) => {
|
||||||
|
// 2. After we have spec, before project start
|
||||||
|
// Download control files from s3-bucket/[tenant-id]/*.control to SHAREDIR/extension
|
||||||
|
let folder = RemotePath::new(Path::new(format!("{tenant_id}")))?;
|
||||||
|
let from_paths = remote_storage.list_files(Some(&folder)).await?;
|
||||||
|
for remote_from_path in from_paths {
|
||||||
|
if remote_from_path.extension() == Some("control") {
|
||||||
|
download_helper(&remote_storage, &remote_from_path, &sharedir)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ExtensionType::Library(library_name) => {
|
||||||
|
// 3. After we have spec, before postgres start
|
||||||
|
// Download preload_shared_libraries from s3-bucket/public/[library-name].control into LIBDIR/
|
||||||
|
let from_path = format!("neon-dev-extensions/public/{library_name}.control");
|
||||||
|
let remote_from_path = RemotePath::new(Path::new(&from_path))?;
|
||||||
|
download_helper(&remote_storage, &remote_from_path, &libdir)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_s3_config(arg_matches: &ArgMatches) -> anyhow::Result<RemoteStorageConfig> {
|
||||||
|
// TODO: Right now we are using the same config parameters as pageserver; but should we have our own configs?
|
||||||
|
// TODO: Should we read the s3_config from CLI arguments?
|
||||||
|
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
|
||||||
|
let cfg_file_contents = std::fs::read_to_string(cfg_file_path)
|
||||||
|
.with_context(|| format!( "Failed to read pageserver config at '{}'", cfg_file_path.display()))?;
|
||||||
|
let toml = cfg_file_contents
|
||||||
|
.parse::<toml_edit::Document>()
|
||||||
|
.with_context(|| format!( "Failed to parse '{}' as pageserver config", cfg_file_path.display()))?;
|
||||||
|
let remote_storage_data = toml.get("remote_storage")
|
||||||
|
.context("field should be present")?;
|
||||||
|
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)?
|
||||||
|
.context("error configuring remote storage")?;
|
||||||
|
Ok(remote_storage_config)
|
||||||
|
}
|
||||||
|
|
||||||
@@ -3,37 +3,121 @@ use std::net::SocketAddr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
|
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use compute_api::requests::ConfigurationRequest;
|
||||||
|
use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
|
use num_cpus;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
use tokio::task;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
use tracing_utils::http::OtelName;
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
|
ComputeStatusResponse {
|
||||||
|
start_time: state.start_time,
|
||||||
|
tenant: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.tenant_id.to_string()),
|
||||||
|
timeline: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.timeline_id.to_string()),
|
||||||
|
status: state.status,
|
||||||
|
last_active: state.last_active,
|
||||||
|
error: state.error.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Service function to handle all available routes.
|
// Service function to handle all available routes.
|
||||||
async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body> {
|
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||||
|
//
|
||||||
|
// NOTE: The URI path is currently included in traces. That's OK because
|
||||||
|
// it doesn't contain any variable parts or sensitive information. But
|
||||||
|
// please keep that in mind if you change the routing here.
|
||||||
|
//
|
||||||
match (req.method(), req.uri().path()) {
|
match (req.method(), req.uri().path()) {
|
||||||
// Serialized compute state.
|
// Serialized compute state.
|
||||||
(&Method::GET, "/status") => {
|
(&Method::GET, "/status") => {
|
||||||
info!("serving /status GET request");
|
info!("serving /status GET request");
|
||||||
let state = compute.state.read().unwrap();
|
let state = compute.state.lock().unwrap();
|
||||||
Response::new(Body::from(serde_json::to_string(&*state).unwrap()))
|
let status_response = status_response_from_state(&state);
|
||||||
|
Response::new(Body::from(serde_json::to_string(&status_response).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
||||||
// future use for Prometheus metrics format.
|
// future use for Prometheus metrics format.
|
||||||
(&Method::GET, "/metrics.json") => {
|
(&Method::GET, "/metrics.json") => {
|
||||||
info!("serving /metrics.json GET request");
|
info!("serving /metrics.json GET request");
|
||||||
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
let metrics = compute.state.lock().unwrap().metrics.clone();
|
||||||
|
Response::new(Body::from(serde_json::to_string(&metrics).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect Postgres current usage insights
|
||||||
|
(&Method::GET, "/insights") => {
|
||||||
|
info!("serving /insights GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!("compute is not running, current status: {:?}", status);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let insights = compute.collect_insights().await;
|
||||||
|
Response::new(Body::from(insights))
|
||||||
}
|
}
|
||||||
|
|
||||||
(&Method::POST, "/check_writability") => {
|
(&Method::POST, "/check_writability") => {
|
||||||
info!("serving /check_writability POST request");
|
info!("serving /check_writability POST request");
|
||||||
let res = crate::checker::check_writability(&compute).await;
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for check_writability request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = crate::checker::check_writability(compute).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
Err(e) => {
|
||||||
|
error!("check_writability failed: {}", e);
|
||||||
|
Response::new(Body::from(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/info") => {
|
||||||
|
let num_cpus = num_cpus::get_physical();
|
||||||
|
info!("serving /info GET request. num_cpus: {}", num_cpus);
|
||||||
|
Response::new(Body::from(
|
||||||
|
serde_json::json!({
|
||||||
|
"num_cpus": num_cpus,
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept spec in JSON format and request compute configuration. If
|
||||||
|
// anything goes wrong after we set the compute status to `ConfigurationPending`
|
||||||
|
// and update compute state with new spec, we basically leave compute
|
||||||
|
// in the potentially wrong state. That said, it's control-plane's
|
||||||
|
// responsibility to watch compute state after reconfiguration request
|
||||||
|
// and to clean restart in case of errors.
|
||||||
|
(&Method::POST, "/configure") => {
|
||||||
|
info!("serving /configure POST request");
|
||||||
|
match handle_configure_request(req, compute).await {
|
||||||
|
Ok(msg) => Response::new(Body::from(msg)),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /configure request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,17 +130,117 @@ async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_configure_request(
|
||||||
|
req: Request<Body>,
|
||||||
|
compute: &Arc<ComputeNode>,
|
||||||
|
) -> Result<String, (String, StatusCode)> {
|
||||||
|
if !compute.live_config_allowed {
|
||||||
|
return Err((
|
||||||
|
"live configuration is not allowed for this compute node".to_string(),
|
||||||
|
StatusCode::PRECONDITION_FAILED,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let spec_raw = String::from_utf8(body_bytes.to_vec()).unwrap();
|
||||||
|
if let Ok(request) = serde_json::from_str::<ConfigurationRequest>(&spec_raw) {
|
||||||
|
let spec = request.spec;
|
||||||
|
|
||||||
|
let parsed_spec = match ParsedSpec::try_from(spec) {
|
||||||
|
Ok(ps) => ps,
|
||||||
|
Err(msg) => return Err((msg, StatusCode::PRECONDITION_FAILED)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// XXX: wrap state update under lock in code blocks. Otherwise,
|
||||||
|
// we will try to `Send` `mut state` into the spawned thread
|
||||||
|
// bellow, which will cause error:
|
||||||
|
// ```
|
||||||
|
// error: future cannot be sent between threads safely
|
||||||
|
// ```
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for configuration request: {:?}",
|
||||||
|
state.status.clone()
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.pspec = Some(parsed_spec);
|
||||||
|
state.status = ComputeStatus::ConfigurationPending;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
info!("set new spec and notified waiters");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Running.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Running {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become Running, current status: {:?}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::Failed {
|
||||||
|
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
||||||
|
let msg = format!("compute configuration failed: {:?}", err);
|
||||||
|
return Err((msg, StatusCode::INTERNAL_SERVER_ERROR));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
|
||||||
|
// Return current compute state if everything went well.
|
||||||
|
let state = compute.state.lock().unwrap().clone();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Ok(serde_json::to_string(&status_response).unwrap())
|
||||||
|
} else {
|
||||||
|
Err(("invalid spec".to_string(), StatusCode::BAD_REQUEST))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
|
||||||
|
let error = GenericAPIError {
|
||||||
|
error: e.to_string(),
|
||||||
|
};
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn serve(state: Arc<ComputeNode>) {
|
async fn serve(port: u16, state: Arc<ComputeNode>) {
|
||||||
let addr = SocketAddr::from(([0, 0, 0, 0], 3080));
|
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||||
|
|
||||||
let make_service = make_service_fn(move |_conn| {
|
let make_service = make_service_fn(move |_conn| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
async move {
|
async move {
|
||||||
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
async move { Ok::<_, Infallible>(routes(req, state).await) }
|
async move {
|
||||||
|
Ok::<_, Infallible>(
|
||||||
|
// NOTE: We include the URI path in the string. It
|
||||||
|
// doesn't contain any variable parts or sensitive
|
||||||
|
// information in this API.
|
||||||
|
tracing_utils::http::tracing_handler(
|
||||||
|
req,
|
||||||
|
|req| routes(req, &state),
|
||||||
|
OtelName::UriPath,
|
||||||
|
)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -72,10 +256,10 @@ async fn serve(state: Arc<ComputeNode>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
||||||
pub fn launch_http_server(state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
||||||
let state = Arc::clone(state);
|
let state = Arc::clone(state);
|
||||||
|
|
||||||
Ok(thread::Builder::new()
|
Ok(thread::Builder::new()
|
||||||
.name("http-endpoint".into())
|
.name("http-endpoint".into())
|
||||||
.spawn(move || serve(state))?)
|
.spawn(move || serve(port, state))?)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ paths:
|
|||||||
/status:
|
/status:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- "info"
|
- Info
|
||||||
summary: Get compute node internal status
|
summary: Get compute node internal status.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: getComputeStatus
|
operationId: getComputeStatus
|
||||||
responses:
|
responses:
|
||||||
"200":
|
200:
|
||||||
description: ComputeState
|
description: ComputeState
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
@@ -25,35 +25,121 @@ paths:
|
|||||||
/metrics.json:
|
/metrics.json:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- "info"
|
- Info
|
||||||
summary: Get compute node startup metrics in JSON format
|
summary: Get compute node startup metrics in JSON format.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: getComputeMetricsJSON
|
operationId: getComputeMetricsJSON
|
||||||
responses:
|
responses:
|
||||||
"200":
|
200:
|
||||||
description: ComputeMetrics
|
description: ComputeMetrics
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeMetrics"
|
$ref: "#/components/schemas/ComputeMetrics"
|
||||||
|
|
||||||
|
/insights:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get current compute insights in JSON format.
|
||||||
|
description: |
|
||||||
|
Note, that this doesn't include any historical data.
|
||||||
|
operationId: getComputeInsights
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Compute insights
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/ComputeInsights"
|
||||||
|
|
||||||
|
/info:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get info about the compute pod / VM.
|
||||||
|
description: ""
|
||||||
|
operationId: getInfo
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Info
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/Info"
|
||||||
|
|
||||||
/check_writability:
|
/check_writability:
|
||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
- "check"
|
- Check
|
||||||
summary: Check that we can write new data on this compute
|
summary: Check that we can write new data on this compute.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: checkComputeWritability
|
operationId: checkComputeWritability
|
||||||
responses:
|
responses:
|
||||||
"200":
|
200:
|
||||||
description: Check result
|
description: Check result
|
||||||
content:
|
content:
|
||||||
text/plain:
|
text/plain:
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
description: Error text or 'true' if check passed
|
description: Error text or 'true' if check passed.
|
||||||
example: "true"
|
example: "true"
|
||||||
|
|
||||||
|
/configure:
|
||||||
|
post:
|
||||||
|
tags:
|
||||||
|
- Configure
|
||||||
|
summary: Perform compute node configuration.
|
||||||
|
description: |
|
||||||
|
This is a blocking API endpoint, i.e. it blocks waiting until
|
||||||
|
compute is finished configuration and is in `Running` state.
|
||||||
|
Optional non-blocking mode could be added later.
|
||||||
|
operationId: configureCompute
|
||||||
|
requestBody:
|
||||||
|
description: Configuration request.
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
properties:
|
||||||
|
spec:
|
||||||
|
# XXX: I don't want to explain current spec in the OpenAPI format,
|
||||||
|
# as it could be changed really soon. Consider doing it later.
|
||||||
|
type: object
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Compute configuration finished.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/ComputeState"
|
||||||
|
400:
|
||||||
|
description: Provided spec is invalid.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
412:
|
||||||
|
description: |
|
||||||
|
It's not possible to do live-configuration of the compute.
|
||||||
|
It's either in the wrong state, or compute doesn't use pull
|
||||||
|
mode of configuration.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
500:
|
||||||
|
description: |
|
||||||
|
Compute configuration request was processed, but error
|
||||||
|
occurred. Compute will likely shutdown soon.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
|
||||||
components:
|
components:
|
||||||
securitySchemes:
|
securitySchemes:
|
||||||
JWT:
|
JWT:
|
||||||
@@ -64,13 +150,16 @@ components:
|
|||||||
schemas:
|
schemas:
|
||||||
ComputeMetrics:
|
ComputeMetrics:
|
||||||
type: object
|
type: object
|
||||||
description: Compute startup metrics
|
description: Compute startup metrics.
|
||||||
required:
|
required:
|
||||||
|
- wait_for_spec_ms
|
||||||
- sync_safekeepers_ms
|
- sync_safekeepers_ms
|
||||||
- basebackup_ms
|
- basebackup_ms
|
||||||
- config_ms
|
- config_ms
|
||||||
- total_startup_ms
|
- total_startup_ms
|
||||||
properties:
|
properties:
|
||||||
|
wait_for_spec_ms:
|
||||||
|
type: integer
|
||||||
sync_safekeepers_ms:
|
sync_safekeepers_ms:
|
||||||
type: integer
|
type: integer
|
||||||
basebackup_ms:
|
basebackup_ms:
|
||||||
@@ -80,28 +169,80 @@ components:
|
|||||||
total_startup_ms:
|
total_startup_ms:
|
||||||
type: integer
|
type: integer
|
||||||
|
|
||||||
|
Info:
|
||||||
|
type: object
|
||||||
|
description: Information about VM/Pod.
|
||||||
|
required:
|
||||||
|
- num_cpus
|
||||||
|
properties:
|
||||||
|
num_cpus:
|
||||||
|
type: integer
|
||||||
|
|
||||||
ComputeState:
|
ComputeState:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
- start_time
|
||||||
- status
|
- status
|
||||||
- last_active
|
|
||||||
properties:
|
properties:
|
||||||
|
start_time:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
Time when compute was started. If initially compute was started in the `empty`
|
||||||
|
state and then provided with valid spec, `start_time` will be reset to the
|
||||||
|
moment, when spec was received.
|
||||||
|
example: "2022-10-12T07:20:50.52Z"
|
||||||
status:
|
status:
|
||||||
$ref: '#/components/schemas/ComputeStatus'
|
$ref: '#/components/schemas/ComputeStatus'
|
||||||
last_active:
|
last_active:
|
||||||
type: string
|
type: string
|
||||||
description: The last detected compute activity timestamp in UTC and RFC3339 format
|
description: |
|
||||||
|
The last detected compute activity timestamp in UTC and RFC3339 format.
|
||||||
|
It could be empty if compute was never used by user since start.
|
||||||
example: "2022-10-12T07:20:50.52Z"
|
example: "2022-10-12T07:20:50.52Z"
|
||||||
error:
|
error:
|
||||||
type: string
|
type: string
|
||||||
description: Text of the error during compute startup, if any
|
description: Text of the error during compute startup or reconfiguration, if any.
|
||||||
|
example: ""
|
||||||
|
tenant:
|
||||||
|
type: string
|
||||||
|
description: Identifier of the current tenant served by compute node, if any.
|
||||||
|
example: c9269c359e9a199fad1ea0981246a78f
|
||||||
|
timeline:
|
||||||
|
type: string
|
||||||
|
description: Identifier of the current timeline served by compute node, if any.
|
||||||
|
example: ece7de74d4b8cbe5433a68ce4d1b97b4
|
||||||
|
|
||||||
|
ComputeInsights:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pg_stat_statements:
|
||||||
|
description: Contains raw output from pg_stat_statements in JSON format.
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
|
||||||
ComputeStatus:
|
ComputeStatus:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
- empty
|
||||||
- init
|
- init
|
||||||
- failed
|
- failed
|
||||||
- running
|
- running
|
||||||
|
- configuration_pending
|
||||||
|
- configuration
|
||||||
|
example: running
|
||||||
|
|
||||||
|
#
|
||||||
|
# Errors
|
||||||
|
#
|
||||||
|
|
||||||
|
GenericError:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- error
|
||||||
|
properties:
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
|
||||||
security:
|
security:
|
||||||
- JWT: []
|
- JWT: []
|
||||||
|
|||||||
@@ -1,50 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::process;
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
|
||||||
|
|
||||||
const VM_INFORMANT_PATH: &str = "/bin/vm-informant";
|
|
||||||
const RESTART_INFORMANT_AFTER_MILLIS: u64 = 5000;
|
|
||||||
|
|
||||||
/// Launch a thread to start the VM informant if it's present (and restart, on failure)
|
|
||||||
pub fn spawn_vm_informant_if_present() -> Result<Option<thread::JoinHandle<()>>> {
|
|
||||||
let exists = Path::new(VM_INFORMANT_PATH)
|
|
||||||
.try_exists()
|
|
||||||
.context("could not check if path exists")?;
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(
|
|
||||||
thread::Builder::new()
|
|
||||||
.name("run-vm-informant".into())
|
|
||||||
.spawn(move || run_informant())?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_informant() -> ! {
|
|
||||||
let restart_wait = Duration::from_millis(RESTART_INFORMANT_AFTER_MILLIS);
|
|
||||||
|
|
||||||
info!("starting VM informant");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut cmd = process::Command::new(VM_INFORMANT_PATH);
|
|
||||||
// Block on subprocess:
|
|
||||||
let result = cmd.status();
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Err(e) => warn!("failed to run VM informant at {VM_INFORMANT_PATH:?}: {e}"),
|
|
||||||
Ok(status) if !status.success() => {
|
|
||||||
warn!("{VM_INFORMANT_PATH} exited with code {status:?}, retrying")
|
|
||||||
}
|
|
||||||
Ok(_) => info!("{VM_INFORMANT_PATH} ended gracefully (unexpectedly). Retrying"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait before retrying
|
|
||||||
thread::sleep(restart_wait);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,12 +4,13 @@
|
|||||||
//!
|
//!
|
||||||
pub mod checker;
|
pub mod checker;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
|
pub mod configurator;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
pub mod informant;
|
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
|
pub mod extensions;
|
||||||
|
|||||||
@@ -1,21 +1,39 @@
|
|||||||
use anyhow::Result;
|
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||||
use tracing_subscriber::layer::SubscriberExt;
|
use tracing_subscriber::layer::SubscriberExt;
|
||||||
use tracing_subscriber::prelude::*;
|
use tracing_subscriber::prelude::*;
|
||||||
|
|
||||||
/// Initialize `env_logger` using either `default_level` or
|
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||||
|
///
|
||||||
|
/// Logging is configured using either `default_log_level` or
|
||||||
/// `RUST_LOG` environment variable as default log level.
|
/// `RUST_LOG` environment variable as default log level.
|
||||||
pub fn init_logger(default_level: &str) -> Result<()> {
|
///
|
||||||
|
/// OpenTelemetry is configured with OTLP/HTTP exporter. It picks up
|
||||||
|
/// configuration from environment variables. For example, to change the destination,
|
||||||
|
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
|
||||||
|
/// `tracing-utils` package description.
|
||||||
|
///
|
||||||
|
pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
||||||
|
// Initialize Logging
|
||||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_level));
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||||
|
|
||||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||||
.with_target(false)
|
.with_target(false)
|
||||||
.with_writer(std::io::stderr);
|
.with_writer(std::io::stderr);
|
||||||
|
|
||||||
|
// Initialize OpenTelemetry
|
||||||
|
let otlp_layer =
|
||||||
|
tracing_utils::init_tracing_without_runtime("compute_ctl").map(OpenTelemetryLayer::new);
|
||||||
|
|
||||||
|
// Put it all together
|
||||||
tracing_subscriber::registry()
|
tracing_subscriber::registry()
|
||||||
.with(env_filter)
|
.with(env_filter)
|
||||||
|
.with(otlp_layer)
|
||||||
.with(fmt_layer)
|
.with(fmt_layer)
|
||||||
.init();
|
.init();
|
||||||
|
tracing::info!("logging and tracing started");
|
||||||
|
|
||||||
|
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
AND usename != 'cloud_admin';", // XXX: find a better way to filter other monitors?
|
AND usename != 'cloud_admin';", // XXX: find a better way to filter other monitors?
|
||||||
&[],
|
&[],
|
||||||
);
|
);
|
||||||
let mut last_active = compute.state.read().unwrap().last_active;
|
let mut last_active = compute.state.lock().unwrap().last_active;
|
||||||
|
|
||||||
if let Ok(backs) = backends {
|
if let Ok(backs) = backends {
|
||||||
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
||||||
@@ -74,7 +74,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
// Found non-idle backend, so the last activity is NOW.
|
// Found non-idle backend, so the last activity is NOW.
|
||||||
// Save it and exit the for loop. Also clear the idle backend
|
// Save it and exit the for loop. Also clear the idle backend
|
||||||
// `state_change` timestamps array as it doesn't matter now.
|
// `state_change` timestamps array as it doesn't matter now.
|
||||||
last_active = Utc::now();
|
last_active = Some(Utc::now());
|
||||||
idle_backs.clear();
|
idle_backs.clear();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -82,15 +82,16 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
|
|
||||||
// Get idle backend `state_change` with the max timestamp.
|
// Get idle backend `state_change` with the max timestamp.
|
||||||
if let Some(last) = idle_backs.iter().max() {
|
if let Some(last) = idle_backs.iter().max() {
|
||||||
last_active = *last;
|
last_active = Some(*last);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the last activity in the shared state if we got a more recent one.
|
// Update the last activity in the shared state if we got a more recent one.
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
// NB: `Some(<DateTime>)` is always greater than `None`.
|
||||||
if last_active > state.last_active {
|
if last_active > state.last_active {
|
||||||
state.last_active = last_active;
|
state.last_active = last_active;
|
||||||
debug!("set the last compute activity time to: {}", last_active);
|
debug!("set the last compute activity time to: {:?}", last_active);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
|||||||
@@ -10,49 +10,34 @@ use std::time::{Duration, Instant};
|
|||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use notify::{RecursiveMode, Watcher};
|
use notify::{RecursiveMode, Watcher};
|
||||||
use postgres::{Client, Transaction};
|
use postgres::{Client, Transaction};
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::{debug, instrument};
|
use tracing::{debug, instrument};
|
||||||
|
|
||||||
|
use compute_api::spec::{Database, GenericOption, GenericOptions, PgIdent, Role};
|
||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
||||||
|
|
||||||
/// Rust representation of Postgres role info with only those fields
|
/// Escape a string for including it in a SQL literal
|
||||||
/// that matter for us.
|
fn escape_literal(s: &str) -> String {
|
||||||
#[derive(Clone, Deserialize)]
|
s.replace('\'', "''").replace('\\', "\\\\")
|
||||||
pub struct Role {
|
|
||||||
pub name: PgIdent,
|
|
||||||
pub encrypted_password: Option<String>,
|
|
||||||
pub options: GenericOptions,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rust representation of Postgres database info with only those fields
|
/// Escape a string so that it can be used in postgresql.conf.
|
||||||
/// that matter for us.
|
/// Same as escape_literal, currently.
|
||||||
#[derive(Clone, Deserialize)]
|
pub fn escape_conf_value(s: &str) -> String {
|
||||||
pub struct Database {
|
s.replace('\'', "''").replace('\\', "\\\\")
|
||||||
pub name: PgIdent,
|
|
||||||
pub owner: PgIdent,
|
|
||||||
pub options: GenericOptions,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Common type representing both SQL statement params with or without value,
|
trait GenericOptionExt {
|
||||||
/// like `LOGIN` or `OWNER username` in the `CREATE/ALTER ROLE`, and config
|
fn to_pg_option(&self) -> String;
|
||||||
/// options like `wal_level = logical`.
|
fn to_pg_setting(&self) -> String;
|
||||||
#[derive(Clone, Deserialize)]
|
|
||||||
pub struct GenericOption {
|
|
||||||
pub name: String,
|
|
||||||
pub value: Option<String>,
|
|
||||||
pub vartype: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Optional collection of `GenericOption`'s. Type alias allows us to
|
impl GenericOptionExt for GenericOption {
|
||||||
/// declare a `trait` on it.
|
|
||||||
pub type GenericOptions = Option<Vec<GenericOption>>;
|
|
||||||
|
|
||||||
impl GenericOption {
|
|
||||||
/// Represent `GenericOption` as SQL statement parameter.
|
/// Represent `GenericOption` as SQL statement parameter.
|
||||||
pub fn to_pg_option(&self) -> String {
|
fn to_pg_option(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} '{}'", self.name, val),
|
"string" => format!("{} '{}'", self.name, escape_literal(val)),
|
||||||
_ => format!("{} {}", self.name, val),
|
_ => format!("{} {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -61,18 +46,11 @@ impl GenericOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Represent `GenericOption` as configuration option.
|
/// Represent `GenericOption` as configuration option.
|
||||||
pub fn to_pg_setting(&self) -> String {
|
fn to_pg_setting(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
let name = match self.name.as_str() {
|
|
||||||
"safekeepers" => "neon.safekeepers",
|
|
||||||
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
|
||||||
"wal_acceptor_connection_timeout" => "neon.safekeeper_connection_timeout",
|
|
||||||
it => it,
|
|
||||||
};
|
|
||||||
|
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} = '{}'", name, val),
|
"string" => format!("{} = '{}'", self.name, escape_conf_value(val)),
|
||||||
_ => format!("{} = {}", name, val),
|
_ => format!("{} = {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.name.to_owned()
|
self.name.to_owned()
|
||||||
@@ -107,6 +85,7 @@ impl PgOptionsSerialize for GenericOptions {
|
|||||||
.map(|op| op.to_pg_setting())
|
.map(|op| op.to_pg_setting())
|
||||||
.collect::<Vec<String>>()
|
.collect::<Vec<String>>()
|
||||||
.join("\n")
|
.join("\n")
|
||||||
|
+ "\n" // newline after last setting
|
||||||
} else {
|
} else {
|
||||||
"".to_string()
|
"".to_string()
|
||||||
}
|
}
|
||||||
@@ -115,6 +94,7 @@ impl PgOptionsSerialize for GenericOptions {
|
|||||||
|
|
||||||
pub trait GenericOptionsSearch {
|
pub trait GenericOptionsSearch {
|
||||||
fn find(&self, name: &str) -> Option<String>;
|
fn find(&self, name: &str) -> Option<String>;
|
||||||
|
fn find_ref(&self, name: &str) -> Option<&GenericOption>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GenericOptionsSearch for GenericOptions {
|
impl GenericOptionsSearch for GenericOptions {
|
||||||
@@ -124,16 +104,25 @@ impl GenericOptionsSearch for GenericOptions {
|
|||||||
let op = ops.iter().find(|s| s.name == name)?;
|
let op = ops.iter().find(|s| s.name == name)?;
|
||||||
op.value.clone()
|
op.value.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Lookup option by name, returning ref
|
||||||
|
fn find_ref(&self, name: &str) -> Option<&GenericOption> {
|
||||||
|
let ops = self.as_ref()?;
|
||||||
|
ops.iter().find(|s| s.name == name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Role {
|
pub trait RoleExt {
|
||||||
|
fn to_pg_options(&self) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RoleExt for Role {
|
||||||
/// Serialize a list of role parameters into a Postgres-acceptable
|
/// Serialize a list of role parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
pub fn to_pg_options(&self) -> String {
|
fn to_pg_options(&self) -> String {
|
||||||
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
||||||
// For now, we do not use generic `options` for roles. Once used, add
|
let mut params: String = self.options.as_pg_options();
|
||||||
// `self.options.as_pg_options()` somewhere here.
|
params.push_str(" LOGIN");
|
||||||
let mut params: String = "LOGIN".to_string();
|
|
||||||
|
|
||||||
if let Some(pass) = &self.encrypted_password {
|
if let Some(pass) = &self.encrypted_password {
|
||||||
// Some time ago we supported only md5 and treated all encrypted_password as md5.
|
// Some time ago we supported only md5 and treated all encrypted_password as md5.
|
||||||
@@ -154,21 +143,17 @@ impl Role {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
pub trait DatabaseExt {
|
||||||
pub fn new(name: PgIdent, owner: PgIdent) -> Self {
|
fn to_pg_options(&self) -> String;
|
||||||
Self {
|
}
|
||||||
name,
|
|
||||||
owner,
|
|
||||||
options: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
impl DatabaseExt for Database {
|
||||||
/// Serialize a list of database parameters into a Postgres-acceptable
|
/// Serialize a list of database parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
||||||
/// to use `template0` and `template1`, so it is not a problem. Yet in the future
|
/// to use `template0` and `template1`, so it is not a problem. Yet in the future
|
||||||
/// it may require a proper quoting too.
|
/// it may require a proper quoting too.
|
||||||
pub fn to_pg_options(&self) -> String {
|
fn to_pg_options(&self) -> String {
|
||||||
let mut params: String = self.options.as_pg_options();
|
let mut params: String = self.options.as_pg_options();
|
||||||
write!(params, " OWNER {}", &self.owner.pg_quote())
|
write!(params, " OWNER {}", &self.owner.pg_quote())
|
||||||
.expect("String is documented to not to error during write operations");
|
.expect("String is documented to not to error during write operations");
|
||||||
@@ -177,10 +162,6 @@ impl Database {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// String type alias representing Postgres identifier and
|
|
||||||
/// intended to be used for DB / role names.
|
|
||||||
pub type PgIdent = String;
|
|
||||||
|
|
||||||
/// Generic trait used to provide quoting / encoding for strings used in the
|
/// Generic trait used to provide quoting / encoding for strings used in the
|
||||||
/// Postgres SQL queries and DATABASE_URL.
|
/// Postgres SQL queries and DATABASE_URL.
|
||||||
pub trait Escaping {
|
pub trait Escaping {
|
||||||
@@ -221,7 +202,11 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| Database::new(row.get("datname"), row.get("owner")))
|
.map(|row| Database {
|
||||||
|
name: row.get("datname"),
|
||||||
|
owner: row.get("owner"),
|
||||||
|
options: None,
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(postgres_dbs)
|
Ok(postgres_dbs)
|
||||||
|
|||||||
@@ -1,52 +1,121 @@
|
|||||||
|
use std::fs::File;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, bail, Result};
|
||||||
use postgres::config::Config;
|
use postgres::config::Config;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::Deserialize;
|
use reqwest::StatusCode;
|
||||||
use tracing::{info, info_span, instrument, span_enabled, warn, Level};
|
use tracing::{error, info, info_span, instrument, span_enabled, warn, Level};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::params::PG_HBA_ALL_MD5;
|
use crate::params::PG_HBA_ALL_MD5;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
|
|
||||||
/// Cluster spec or configuration represented as an optional number of
|
use compute_api::responses::{ControlPlaneComputeStatus, ControlPlaneSpecResponse};
|
||||||
/// delta operations + final cluster state description.
|
use compute_api::spec::{ComputeSpec, Database, PgIdent, Role};
|
||||||
#[derive(Clone, Deserialize)]
|
|
||||||
pub struct ComputeSpec {
|
// Do control plane request and return response if any. In case of error it
|
||||||
pub format_version: f32,
|
// returns a bool flag indicating whether it makes sense to retry the request
|
||||||
pub timestamp: String,
|
// and a string with error message.
|
||||||
pub operation_uuid: Option<String>,
|
fn do_control_plane_request(
|
||||||
/// Expected cluster state at the end of transition process.
|
uri: &str,
|
||||||
pub cluster: Cluster,
|
jwt: &str,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
) -> Result<ControlPlaneSpecResponse, (bool, String)> {
|
||||||
|
let resp = reqwest::blocking::Client::new()
|
||||||
|
.get(uri)
|
||||||
|
.header("Authorization", jwt)
|
||||||
|
.send()
|
||||||
|
.map_err(|e| {
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
format!("could not perform spec request to control plane: {}", e),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match resp.status() {
|
||||||
|
StatusCode::OK => match resp.json::<ControlPlaneSpecResponse>() {
|
||||||
|
Ok(spec_resp) => Ok(spec_resp),
|
||||||
|
Err(e) => Err((
|
||||||
|
true,
|
||||||
|
format!("could not deserialize control plane response: {}", e),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE => {
|
||||||
|
Err((true, "control plane is temporarily unavailable".to_string()))
|
||||||
|
}
|
||||||
|
StatusCode::BAD_GATEWAY => {
|
||||||
|
// We have a problem with intermittent 502 errors now
|
||||||
|
// https://github.com/neondatabase/cloud/issues/2353
|
||||||
|
// It's fine to retry GET request in this case.
|
||||||
|
Err((true, "control plane request failed with 502".to_string()))
|
||||||
|
}
|
||||||
|
// Another code, likely 500 or 404, means that compute is unknown to the control plane
|
||||||
|
// or some internal failure happened. Doesn't make much sense to retry in this case.
|
||||||
|
_ => Err((
|
||||||
|
false,
|
||||||
|
format!(
|
||||||
|
"unexpected control plane response status code: {}",
|
||||||
|
resp.status()
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster state seen from the perspective of the external tools
|
/// Request spec from the control-plane by compute_id. If `NEON_CONTROL_PLANE_TOKEN`
|
||||||
/// like Rails web console.
|
/// env variable is set, it will be used for authorization.
|
||||||
#[derive(Clone, Deserialize)]
|
pub fn get_spec_from_control_plane(
|
||||||
pub struct Cluster {
|
base_uri: &str,
|
||||||
pub cluster_id: String,
|
compute_id: &str,
|
||||||
pub name: String,
|
) -> Result<Option<ComputeSpec>> {
|
||||||
pub state: Option<String>,
|
let cp_uri = format!("{base_uri}/management/api/v2/computes/{compute_id}/spec");
|
||||||
pub roles: Vec<Role>,
|
let jwt: String = match std::env::var("NEON_CONTROL_PLANE_TOKEN") {
|
||||||
pub databases: Vec<Database>,
|
Ok(v) => v,
|
||||||
pub settings: GenericOptions,
|
Err(_) => "".to_string(),
|
||||||
}
|
};
|
||||||
|
let mut attempt = 1;
|
||||||
|
let mut spec: Result<Option<ComputeSpec>> = Ok(None);
|
||||||
|
|
||||||
/// Single cluster state changing operation that could not be represented as
|
info!("getting spec from control plane: {}", cp_uri);
|
||||||
/// a static `Cluster` structure. For example:
|
|
||||||
/// - DROP DATABASE
|
// Do 3 attempts to get spec from the control plane using the following logic:
|
||||||
/// - DROP ROLE
|
// - network error -> then retry
|
||||||
/// - ALTER ROLE name RENAME TO new_name
|
// - compute id is unknown or any other error -> bail out
|
||||||
/// - ALTER DATABASE name RENAME TO new_name
|
// - no spec for compute yet (Empty state) -> return Ok(None)
|
||||||
#[derive(Clone, Deserialize)]
|
// - got spec -> return Ok(Some(spec))
|
||||||
pub struct DeltaOp {
|
while attempt < 4 {
|
||||||
pub action: String,
|
spec = match do_control_plane_request(&cp_uri, &jwt) {
|
||||||
pub name: PgIdent,
|
Ok(spec_resp) => match spec_resp.status {
|
||||||
pub new_name: Option<PgIdent>,
|
ControlPlaneComputeStatus::Empty => Ok(None),
|
||||||
|
ControlPlaneComputeStatus::Attached => {
|
||||||
|
if let Some(spec) = spec_resp.spec {
|
||||||
|
Ok(Some(spec))
|
||||||
|
} else {
|
||||||
|
bail!("compute is attached, but spec is empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err((retry, msg)) => {
|
||||||
|
if retry {
|
||||||
|
Err(anyhow!(msg))
|
||||||
|
} else {
|
||||||
|
bail!(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = &spec {
|
||||||
|
error!("attempt {} to get spec failed with: {}", attempt, e);
|
||||||
|
} else {
|
||||||
|
return spec;
|
||||||
|
}
|
||||||
|
|
||||||
|
attempt += 1;
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
|
||||||
|
// All attempts failed, return error.
|
||||||
|
spec
|
||||||
}
|
}
|
||||||
|
|
||||||
/// It takes cluster specification and does the following:
|
/// It takes cluster specification and does the following:
|
||||||
@@ -77,6 +146,21 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a standby.signal file
|
||||||
|
pub fn add_standby_signal(pgdata_path: &Path) -> Result<()> {
|
||||||
|
// XXX: consider making it a part of spec.json
|
||||||
|
info!("adding standby.signal");
|
||||||
|
let signalfile = pgdata_path.join("standby.signal");
|
||||||
|
|
||||||
|
if !signalfile.exists() {
|
||||||
|
info!("created standby.signal");
|
||||||
|
File::create(signalfile)?;
|
||||||
|
} else {
|
||||||
|
info!("reused pre-existing standby.signal");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Given a cluster spec json and open transaction it handles roles creation,
|
/// Given a cluster spec json and open transaction it handles roles creation,
|
||||||
/// deletion and update.
|
/// deletion and update.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
@@ -221,8 +305,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
/// Reassign all dependent objects and delete requested roles.
|
/// Reassign all dependent objects and delete requested roles.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_role_deletions(spec: &ComputeSpec, connstr: &str, client: &mut Client) -> Result<()> {
|
||||||
if let Some(ops) = &node.spec.delta_operations {
|
if let Some(ops) = &spec.delta_operations {
|
||||||
// First, reassign all dependent objects to db owners.
|
// First, reassign all dependent objects to db owners.
|
||||||
info!("reassigning dependent objects of to-be-deleted roles");
|
info!("reassigning dependent objects of to-be-deleted roles");
|
||||||
|
|
||||||
@@ -239,7 +323,7 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
// Check that role is still present in Postgres, as this could be a
|
// Check that role is still present in Postgres, as this could be a
|
||||||
// restart with the same spec after role deletion.
|
// restart with the same spec after role deletion.
|
||||||
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
reassign_owned_objects(node, &op.name)?;
|
reassign_owned_objects(spec, connstr, &op.name)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,10 +347,10 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reassign all owned objects in all databases to the owner of the database.
|
// Reassign all owned objects in all databases to the owner of the database.
|
||||||
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
fn reassign_owned_objects(spec: &ComputeSpec, connstr: &str, role_name: &PgIdent) -> Result<()> {
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
if db.owner != *role_name {
|
if db.owner != *role_name {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
let mut conf = Config::from_str(connstr)?;
|
||||||
conf.dbname(&db.name);
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
@@ -384,13 +468,13 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
name.pg_quote(),
|
name.pg_quote(),
|
||||||
db.owner.pg_quote()
|
db.owner.pg_quote()
|
||||||
);
|
);
|
||||||
let _ = info_span!("executing", query).entered();
|
let _guard = info_span!("executing", query).entered();
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
DatabaseAction::Create => {
|
DatabaseAction::Create => {
|
||||||
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
||||||
query.push_str(&db.to_pg_options());
|
query.push_str(&db.to_pg_options());
|
||||||
let _ = info_span!("executing", query).entered();
|
let _guard = info_span!("executing", query).entered();
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -411,9 +495,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_grants(spec: &ComputeSpec, connstr: &str, client: &mut Client) -> Result<()> {
|
||||||
let spec = &node.spec;
|
|
||||||
|
|
||||||
info!("cluster spec grants:");
|
info!("cluster spec grants:");
|
||||||
|
|
||||||
// We now have a separate `web_access` role to connect to the database
|
// We now have a separate `web_access` role to connect to the database
|
||||||
@@ -445,8 +527,8 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
// Do some per-database access adjustments. We'd better do this at db creation time,
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
// atomically.
|
// atomically.
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
let mut conf = Config::from_str(connstr)?;
|
||||||
conf.dbname(&db.name);
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
@@ -512,3 +594,18 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create required system extensions
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_extensions(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
|
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
|
||||||
|
if libs.contains("pg_stat_statements") {
|
||||||
|
// Create extension only if this compute really needs it
|
||||||
|
let query = "CREATE EXTENSION IF NOT EXISTS pg_stat_statements";
|
||||||
|
info!("creating system extensions with query: {}", query);
|
||||||
|
client.simple_query(query)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod pg_helpers_tests {
|
mod pg_helpers_tests {
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
|
||||||
|
use compute_api::spec::{ComputeSpec, GenericOption, GenericOptions, PgIdent};
|
||||||
use compute_tools::pg_helpers::*;
|
use compute_tools::pg_helpers::*;
|
||||||
use compute_tools::spec::ComputeSpec;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn params_serialize() {
|
fn params_serialize() {
|
||||||
let file = File::open("tests/cluster_spec.json").unwrap();
|
let file = File::open("../libs/compute_api/tests/cluster_spec.json").unwrap();
|
||||||
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -17,18 +16,41 @@ mod pg_helpers_tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
spec.cluster.roles.first().unwrap().to_pg_options(),
|
spec.cluster.roles.first().unwrap().to_pg_options(),
|
||||||
"LOGIN PASSWORD 'md56b1d16b78004bbd51fa06af9eda75972'"
|
" LOGIN PASSWORD 'md56b1d16b78004bbd51fa06af9eda75972'"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn settings_serialize() {
|
fn settings_serialize() {
|
||||||
let file = File::open("tests/cluster_spec.json").unwrap();
|
let file = File::open("../libs/compute_api/tests/cluster_spec.json").unwrap();
|
||||||
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
spec.cluster.settings.as_pg_settings(),
|
spec.cluster.settings.as_pg_settings(),
|
||||||
"fsync = off\nwal_level = replica\nhot_standby = on\nneon.safekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenant_id = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timeline_id = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
|
r#"fsync = off
|
||||||
|
wal_level = replica
|
||||||
|
hot_standby = on
|
||||||
|
neon.safekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'
|
||||||
|
wal_log_hints = on
|
||||||
|
log_connections = on
|
||||||
|
shared_buffers = 32768
|
||||||
|
port = 55432
|
||||||
|
max_connections = 100
|
||||||
|
max_wal_senders = 10
|
||||||
|
listen_addresses = '0.0.0.0'
|
||||||
|
wal_sender_timeout = 0
|
||||||
|
password_encryption = md5
|
||||||
|
maintenance_work_mem = 65536
|
||||||
|
max_parallel_workers = 8
|
||||||
|
max_worker_processes = 8
|
||||||
|
neon.tenant_id = 'b0554b632bd4d547a63b86c3630317e8'
|
||||||
|
max_replication_slots = 10
|
||||||
|
neon.timeline_id = '2414a61ffc94e428f14b5758fe308e13'
|
||||||
|
shared_preload_libraries = 'neon'
|
||||||
|
synchronous_standby_names = 'walproposer'
|
||||||
|
neon.pageserver_connstring = 'host=127.0.0.1 port=6400'
|
||||||
|
test.escaping = 'here''s a backslash \\ and a quote '' and a double-quote " hooray'
|
||||||
|
"#
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ postgres.workspace = true
|
|||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
reqwest = { workspace = true, features = ["blocking", "json"] }
|
reqwest = { workspace = true, features = ["blocking", "json"] }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
serde_with.workspace = true
|
serde_with.workspace = true
|
||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
@@ -23,9 +24,11 @@ url.workspace = true
|
|||||||
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
||||||
# instead, so that recompile times are better.
|
# instead, so that recompile times are better.
|
||||||
pageserver_api.workspace = true
|
pageserver_api.workspace = true
|
||||||
|
postgres_backend.workspace = true
|
||||||
safekeeper_api.workspace = true
|
safekeeper_api.workspace = true
|
||||||
postgres_connection.workspace = true
|
postgres_connection.workspace = true
|
||||||
storage_broker.workspace = true
|
storage_broker.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
|
|
||||||
|
compute_api.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
[pageserver]
|
[pageserver]
|
||||||
listen_pg_addr = '127.0.0.1:64000'
|
listen_pg_addr = '127.0.0.1:64000'
|
||||||
listen_http_addr = '127.0.0.1:9898'
|
listen_http_addr = '127.0.0.1:9898'
|
||||||
auth_type = 'Trust'
|
pg_auth_type = 'Trust'
|
||||||
|
http_auth_type = 'Trust'
|
||||||
|
|
||||||
[[safekeepers]]
|
[[safekeepers]]
|
||||||
id = 1
|
id = 1
|
||||||
|
|||||||
@@ -3,7 +3,8 @@
|
|||||||
[pageserver]
|
[pageserver]
|
||||||
listen_pg_addr = '127.0.0.1:64000'
|
listen_pg_addr = '127.0.0.1:64000'
|
||||||
listen_http_addr = '127.0.0.1:9898'
|
listen_http_addr = '127.0.0.1:9898'
|
||||||
auth_type = 'Trust'
|
pg_auth_type = 'Trust'
|
||||||
|
http_auth_type = 'Trust'
|
||||||
|
|
||||||
[[safekeepers]]
|
[[safekeepers]]
|
||||||
id = 1
|
id = 1
|
||||||
|
|||||||
@@ -7,7 +7,8 @@
|
|||||||
//!
|
//!
|
||||||
use anyhow::{anyhow, bail, Context, Result};
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command};
|
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command};
|
||||||
use control_plane::compute::ComputeControlPlane;
|
use compute_api::spec::ComputeMode;
|
||||||
|
use control_plane::endpoint::ComputeControlPlane;
|
||||||
use control_plane::local_env::LocalEnv;
|
use control_plane::local_env::LocalEnv;
|
||||||
use control_plane::pageserver::PageServerNode;
|
use control_plane::pageserver::PageServerNode;
|
||||||
use control_plane::safekeeper::SafekeeperNode;
|
use control_plane::safekeeper::SafekeeperNode;
|
||||||
@@ -17,6 +18,7 @@ use pageserver_api::{
|
|||||||
DEFAULT_HTTP_LISTEN_ADDR as DEFAULT_PAGESERVER_HTTP_ADDR,
|
DEFAULT_HTTP_LISTEN_ADDR as DEFAULT_PAGESERVER_HTTP_ADDR,
|
||||||
DEFAULT_PG_LISTEN_ADDR as DEFAULT_PAGESERVER_PG_ADDR,
|
DEFAULT_PG_LISTEN_ADDR as DEFAULT_PAGESERVER_PG_ADDR,
|
||||||
};
|
};
|
||||||
|
use postgres_backend::AuthType;
|
||||||
use safekeeper_api::{
|
use safekeeper_api::{
|
||||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
|
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
|
||||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT,
|
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT,
|
||||||
@@ -30,7 +32,6 @@ use utils::{
|
|||||||
auth::{Claims, Scope},
|
auth::{Claims, Scope},
|
||||||
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
|
||||||
project_git_version,
|
project_git_version,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -40,7 +41,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
|
|||||||
const DEFAULT_BRANCH_NAME: &str = "main";
|
const DEFAULT_BRANCH_NAME: &str = "main";
|
||||||
project_git_version!(GIT_VERSION);
|
project_git_version!(GIT_VERSION);
|
||||||
|
|
||||||
const DEFAULT_PG_VERSION: &str = "14";
|
const DEFAULT_PG_VERSION: &str = "15";
|
||||||
|
|
||||||
fn default_conf() -> String {
|
fn default_conf() -> String {
|
||||||
format!(
|
format!(
|
||||||
@@ -53,14 +54,15 @@ listen_addr = '{DEFAULT_BROKER_ADDR}'
|
|||||||
id = {DEFAULT_PAGESERVER_ID}
|
id = {DEFAULT_PAGESERVER_ID}
|
||||||
listen_pg_addr = '{DEFAULT_PAGESERVER_PG_ADDR}'
|
listen_pg_addr = '{DEFAULT_PAGESERVER_PG_ADDR}'
|
||||||
listen_http_addr = '{DEFAULT_PAGESERVER_HTTP_ADDR}'
|
listen_http_addr = '{DEFAULT_PAGESERVER_HTTP_ADDR}'
|
||||||
auth_type = '{pageserver_auth_type}'
|
pg_auth_type = '{trust_auth}'
|
||||||
|
http_auth_type = '{trust_auth}'
|
||||||
|
|
||||||
[[safekeepers]]
|
[[safekeepers]]
|
||||||
id = {DEFAULT_SAFEKEEPER_ID}
|
id = {DEFAULT_SAFEKEEPER_ID}
|
||||||
pg_port = {DEFAULT_SAFEKEEPER_PG_PORT}
|
pg_port = {DEFAULT_SAFEKEEPER_PG_PORT}
|
||||||
http_port = {DEFAULT_SAFEKEEPER_HTTP_PORT}
|
http_port = {DEFAULT_SAFEKEEPER_HTTP_PORT}
|
||||||
"#,
|
"#,
|
||||||
pageserver_auth_type = AuthType::Trust,
|
trust_auth = AuthType::Trust,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,8 +107,9 @@ fn main() -> Result<()> {
|
|||||||
"start" => handle_start_all(sub_args, &env),
|
"start" => handle_start_all(sub_args, &env),
|
||||||
"stop" => handle_stop_all(sub_args, &env),
|
"stop" => handle_stop_all(sub_args, &env),
|
||||||
"pageserver" => handle_pageserver(sub_args, &env),
|
"pageserver" => handle_pageserver(sub_args, &env),
|
||||||
"pg" => handle_pg(sub_args, &env),
|
|
||||||
"safekeeper" => handle_safekeeper(sub_args, &env),
|
"safekeeper" => handle_safekeeper(sub_args, &env),
|
||||||
|
"endpoint" => handle_endpoint(sub_args, &env),
|
||||||
|
"pg" => bail!("'pg' subcommand has been renamed to 'endpoint'"),
|
||||||
_ => bail!("unexpected subcommand {sub_name}"),
|
_ => bail!("unexpected subcommand {sub_name}"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -469,10 +472,18 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
|
|||||||
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
||||||
println!("Importing timeline into pageserver ...");
|
println!("Importing timeline into pageserver ...");
|
||||||
pageserver.timeline_import(tenant_id, timeline_id, base, pg_wal, pg_version)?;
|
pageserver.timeline_import(tenant_id, timeline_id, base, pg_wal, pg_version)?;
|
||||||
println!("Creating node for imported timeline ...");
|
|
||||||
env.register_branch_mapping(name.to_string(), tenant_id, timeline_id)?;
|
env.register_branch_mapping(name.to_string(), tenant_id, timeline_id)?;
|
||||||
|
|
||||||
cplane.new_node(tenant_id, name, timeline_id, None, None, pg_version)?;
|
println!("Creating endpoint for imported timeline ...");
|
||||||
|
cplane.new_endpoint(
|
||||||
|
name,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
pg_version,
|
||||||
|
ComputeMode::Primary,
|
||||||
|
)?;
|
||||||
println!("Done");
|
println!("Done");
|
||||||
}
|
}
|
||||||
Some(("branch", branch_match)) => {
|
Some(("branch", branch_match)) => {
|
||||||
@@ -520,10 +531,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||||
let (sub_name, sub_args) = match pg_match.subcommand() {
|
let (sub_name, sub_args) = match ep_match.subcommand() {
|
||||||
Some(pg_subcommand_data) => pg_subcommand_data,
|
Some(ep_subcommand_data) => ep_subcommand_data,
|
||||||
None => bail!("no pg subcommand provided"),
|
None => bail!("no endpoint subcommand provided"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
||||||
@@ -545,7 +556,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
table.load_preset(comfy_table::presets::NOTHING);
|
table.load_preset(comfy_table::presets::NOTHING);
|
||||||
|
|
||||||
table.set_header([
|
table.set_header([
|
||||||
"NODE",
|
"ENDPOINT",
|
||||||
"ADDRESS",
|
"ADDRESS",
|
||||||
"TIMELINE",
|
"TIMELINE",
|
||||||
"BRANCH NAME",
|
"BRANCH NAME",
|
||||||
@@ -553,39 +564,39 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
"STATUS",
|
"STATUS",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
for ((_, node_name), node) in cplane
|
for (endpoint_id, endpoint) in cplane
|
||||||
.nodes
|
.endpoints
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|((node_tenant_id, _), _)| node_tenant_id == &tenant_id)
|
.filter(|(_, endpoint)| endpoint.tenant_id == tenant_id)
|
||||||
{
|
{
|
||||||
let lsn_str = match node.lsn {
|
let lsn_str = match endpoint.mode {
|
||||||
None => {
|
ComputeMode::Static(lsn) => {
|
||||||
// -> primary node
|
// -> read-only endpoint
|
||||||
// Use the LSN at the end of the timeline.
|
|
||||||
timeline_infos
|
|
||||||
.get(&node.timeline_id)
|
|
||||||
.map(|bi| bi.last_record_lsn.to_string())
|
|
||||||
.unwrap_or_else(|| "?".to_string())
|
|
||||||
}
|
|
||||||
Some(lsn) => {
|
|
||||||
// -> read-only node
|
|
||||||
// Use the node's LSN.
|
// Use the node's LSN.
|
||||||
lsn.to_string()
|
lsn.to_string()
|
||||||
}
|
}
|
||||||
|
_ => {
|
||||||
|
// -> primary endpoint or hot replica
|
||||||
|
// Use the LSN at the end of the timeline.
|
||||||
|
timeline_infos
|
||||||
|
.get(&endpoint.timeline_id)
|
||||||
|
.map(|bi| bi.last_record_lsn.to_string())
|
||||||
|
.unwrap_or_else(|| "?".to_string())
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let branch_name = timeline_name_mappings
|
let branch_name = timeline_name_mappings
|
||||||
.get(&TenantTimelineId::new(tenant_id, node.timeline_id))
|
.get(&TenantTimelineId::new(tenant_id, endpoint.timeline_id))
|
||||||
.map(|name| name.as_str())
|
.map(|name| name.as_str())
|
||||||
.unwrap_or("?");
|
.unwrap_or("?");
|
||||||
|
|
||||||
table.add_row([
|
table.add_row([
|
||||||
node_name.as_str(),
|
endpoint_id.as_str(),
|
||||||
&node.address.to_string(),
|
&endpoint.pg_address.to_string(),
|
||||||
&node.timeline_id.to_string(),
|
&endpoint.timeline_id.to_string(),
|
||||||
branch_name,
|
branch_name,
|
||||||
lsn_str.as_str(),
|
lsn_str.as_str(),
|
||||||
node.status(),
|
endpoint.status(),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -596,10 +607,10 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_one::<String>("branch-name")
|
.get_one::<String>("branch-name")
|
||||||
.map(|s| s.as_str())
|
.map(|s| s.as_str())
|
||||||
.unwrap_or(DEFAULT_BRANCH_NAME);
|
.unwrap_or(DEFAULT_BRANCH_NAME);
|
||||||
let node_name = sub_args
|
let endpoint_id = sub_args
|
||||||
.get_one::<String>("node")
|
.get_one::<String>("endpoint_id")
|
||||||
.map(|node_name| node_name.to_string())
|
.map(String::to_string)
|
||||||
.unwrap_or_else(|| format!("{branch_name}_node"));
|
.unwrap_or_else(|| format!("ep-{branch_name}"));
|
||||||
|
|
||||||
let lsn = sub_args
|
let lsn = sub_args
|
||||||
.get_one::<String>("lsn")
|
.get_one::<String>("lsn")
|
||||||
@@ -610,24 +621,60 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_branch_timeline_id(branch_name, tenant_id)
|
.get_branch_timeline_id(branch_name, tenant_id)
|
||||||
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?;
|
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?;
|
||||||
|
|
||||||
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
|
let pg_port: Option<u16> = sub_args.get_one::<u16>("pg-port").copied();
|
||||||
|
let http_port: Option<u16> = sub_args.get_one::<u16>("http-port").copied();
|
||||||
let pg_version = sub_args
|
let pg_version = sub_args
|
||||||
.get_one::<u32>("pg-version")
|
.get_one::<u32>("pg-version")
|
||||||
.copied()
|
.copied()
|
||||||
.context("Failed to parse postgres version from the argument string")?;
|
.context("Failed to parse postgres version from the argument string")?;
|
||||||
|
|
||||||
cplane.new_node(tenant_id, &node_name, timeline_id, lsn, port, pg_version)?;
|
let hot_standby = sub_args
|
||||||
|
.get_one::<bool>("hot-standby")
|
||||||
|
.copied()
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
let mode = match (lsn, hot_standby) {
|
||||||
|
(Some(lsn), false) => ComputeMode::Static(lsn),
|
||||||
|
(None, true) => ComputeMode::Replica,
|
||||||
|
(None, false) => ComputeMode::Primary,
|
||||||
|
(Some(_), true) => anyhow::bail!("cannot specify both lsn and hot-standby"),
|
||||||
|
};
|
||||||
|
|
||||||
|
cplane.new_endpoint(
|
||||||
|
&endpoint_id,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
pg_port,
|
||||||
|
http_port,
|
||||||
|
pg_version,
|
||||||
|
mode,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
"start" => {
|
"start" => {
|
||||||
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
|
let pg_port: Option<u16> = sub_args.get_one::<u16>("pg-port").copied();
|
||||||
let node_name = sub_args
|
let http_port: Option<u16> = sub_args.get_one::<u16>("http-port").copied();
|
||||||
.get_one::<String>("node")
|
let endpoint_id = sub_args
|
||||||
.ok_or_else(|| anyhow!("No node name was provided to start"))?;
|
.get_one::<String>("endpoint_id")
|
||||||
|
.ok_or_else(|| anyhow!("No endpoint ID was provided to start"))?;
|
||||||
|
|
||||||
let node = cplane.nodes.get(&(tenant_id, node_name.to_string()));
|
// If --safekeepers argument is given, use only the listed safekeeper nodes.
|
||||||
|
let safekeepers =
|
||||||
|
if let Some(safekeepers_str) = sub_args.get_one::<String>("safekeepers") {
|
||||||
|
let mut safekeepers: Vec<NodeId> = Vec::new();
|
||||||
|
for sk_id in safekeepers_str.split(',').map(str::trim) {
|
||||||
|
let sk_id = NodeId(u64::from_str(sk_id).map_err(|_| {
|
||||||
|
anyhow!("invalid node ID \"{sk_id}\" in --safekeepers list")
|
||||||
|
})?);
|
||||||
|
safekeepers.push(sk_id);
|
||||||
|
}
|
||||||
|
safekeepers
|
||||||
|
} else {
|
||||||
|
env.safekeepers.iter().map(|sk| sk.id).collect()
|
||||||
|
};
|
||||||
|
|
||||||
let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) {
|
let endpoint = cplane.endpoints.get(endpoint_id.as_str());
|
||||||
|
|
||||||
|
let auth_token = if matches!(env.pageserver.pg_auth_type, AuthType::NeonJWT) {
|
||||||
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
|
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
|
||||||
|
|
||||||
Some(env.generate_auth_token(&claims)?)
|
Some(env.generate_auth_token(&claims)?)
|
||||||
@@ -635,9 +682,23 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(node) = node {
|
let hot_standby = sub_args
|
||||||
println!("Starting existing postgres {node_name}...");
|
.get_one::<bool>("hot-standby")
|
||||||
node.start(&auth_token)?;
|
.copied()
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
if let Some(endpoint) = endpoint {
|
||||||
|
match (&endpoint.mode, hot_standby) {
|
||||||
|
(ComputeMode::Static(_), true) => {
|
||||||
|
bail!("Cannot start a node in hot standby mode when it is already configured as a static replica")
|
||||||
|
}
|
||||||
|
(ComputeMode::Primary, true) => {
|
||||||
|
bail!("Cannot start a node as a hot standby replica, it is already configured as primary node")
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
println!("Starting existing endpoint {endpoint_id}...");
|
||||||
|
endpoint.start(&auth_token, safekeepers)?;
|
||||||
} else {
|
} else {
|
||||||
let branch_name = sub_args
|
let branch_name = sub_args
|
||||||
.get_one::<String>("branch-name")
|
.get_one::<String>("branch-name")
|
||||||
@@ -657,32 +718,47 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_one::<u32>("pg-version")
|
.get_one::<u32>("pg-version")
|
||||||
.copied()
|
.copied()
|
||||||
.context("Failed to `pg-version` from the argument string")?;
|
.context("Failed to `pg-version` from the argument string")?;
|
||||||
|
|
||||||
|
let mode = match (lsn, hot_standby) {
|
||||||
|
(Some(lsn), false) => ComputeMode::Static(lsn),
|
||||||
|
(None, true) => ComputeMode::Replica,
|
||||||
|
(None, false) => ComputeMode::Primary,
|
||||||
|
(Some(_), true) => anyhow::bail!("cannot specify both lsn and hot-standby"),
|
||||||
|
};
|
||||||
|
|
||||||
// when used with custom port this results in non obvious behaviour
|
// when used with custom port this results in non obvious behaviour
|
||||||
// port is remembered from first start command, i e
|
// port is remembered from first start command, i e
|
||||||
// start --port X
|
// start --port X
|
||||||
// stop
|
// stop
|
||||||
// start <-- will also use port X even without explicit port argument
|
// start <-- will also use port X even without explicit port argument
|
||||||
println!("Starting new postgres (v{pg_version}) {node_name} on timeline {timeline_id} ...");
|
println!("Starting new endpoint {endpoint_id} (PostgreSQL v{pg_version}) on timeline {timeline_id} ...");
|
||||||
|
|
||||||
let node =
|
let ep = cplane.new_endpoint(
|
||||||
cplane.new_node(tenant_id, node_name, timeline_id, lsn, port, pg_version)?;
|
endpoint_id,
|
||||||
node.start(&auth_token)?;
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
pg_port,
|
||||||
|
http_port,
|
||||||
|
pg_version,
|
||||||
|
mode,
|
||||||
|
)?;
|
||||||
|
ep.start(&auth_token, safekeepers)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"stop" => {
|
"stop" => {
|
||||||
let node_name = sub_args
|
let endpoint_id = sub_args
|
||||||
.get_one::<String>("node")
|
.get_one::<String>("endpoint_id")
|
||||||
.ok_or_else(|| anyhow!("No node name was provided to stop"))?;
|
.ok_or_else(|| anyhow!("No endpoint ID was provided to stop"))?;
|
||||||
let destroy = sub_args.get_flag("destroy");
|
let destroy = sub_args.get_flag("destroy");
|
||||||
|
|
||||||
let node = cplane
|
let endpoint = cplane
|
||||||
.nodes
|
.endpoints
|
||||||
.get(&(tenant_id, node_name.to_string()))
|
.get(endpoint_id.as_str())
|
||||||
.with_context(|| format!("postgres {node_name} is not found"))?;
|
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||||
node.stop(destroy)?;
|
endpoint.stop(destroy)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => bail!("Unexpected pg subcommand '{sub_name}'"),
|
_ => bail!("Unexpected endpoint subcommand '{sub_name}'"),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -801,7 +877,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
// Postgres nodes are not started automatically
|
// Endpoints are not started automatically
|
||||||
|
|
||||||
broker::start_broker_process(env)?;
|
broker::start_broker_process(env)?;
|
||||||
|
|
||||||
@@ -835,10 +911,10 @@ fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<
|
|||||||
fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
||||||
let pageserver = PageServerNode::from_env(env);
|
let pageserver = PageServerNode::from_env(env);
|
||||||
|
|
||||||
// Stop all compute nodes
|
// Stop all endpoints
|
||||||
match ComputeControlPlane::load(env.clone()) {
|
match ComputeControlPlane::load(env.clone()) {
|
||||||
Ok(cplane) => {
|
Ok(cplane) => {
|
||||||
for (_k, node) in cplane.nodes {
|
for (_k, node) in cplane.endpoints {
|
||||||
if let Err(e) = node.stop(false) {
|
if let Err(e) = node.stop(false) {
|
||||||
eprintln!("postgres stop failed: {e:#}");
|
eprintln!("postgres stop failed: {e:#}");
|
||||||
}
|
}
|
||||||
@@ -871,7 +947,9 @@ fn cli() -> Command {
|
|||||||
.help("Name of the branch to be created or used as an alias for other services")
|
.help("Name of the branch to be created or used as an alias for other services")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
|
let endpoint_id_arg = Arg::new("endpoint_id")
|
||||||
|
.help("Postgres endpoint id")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
|
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
|
||||||
|
|
||||||
@@ -892,11 +970,22 @@ fn cli() -> Command {
|
|||||||
.value_parser(value_parser!(u32))
|
.value_parser(value_parser!(u32))
|
||||||
.default_value(DEFAULT_PG_VERSION);
|
.default_value(DEFAULT_PG_VERSION);
|
||||||
|
|
||||||
let port_arg = Arg::new("port")
|
let pg_port_arg = Arg::new("pg-port")
|
||||||
.long("port")
|
.long("pg-port")
|
||||||
.required(false)
|
.required(false)
|
||||||
.value_parser(value_parser!(u16))
|
.value_parser(value_parser!(u16))
|
||||||
.value_name("port");
|
.value_name("pg-port");
|
||||||
|
|
||||||
|
let http_port_arg = Arg::new("http-port")
|
||||||
|
.long("http-port")
|
||||||
|
.required(false)
|
||||||
|
.value_parser(value_parser!(u16))
|
||||||
|
.value_name("http-port");
|
||||||
|
|
||||||
|
let safekeepers_arg = Arg::new("safekeepers")
|
||||||
|
.long("safekeepers")
|
||||||
|
.required(false)
|
||||||
|
.value_name("safekeepers");
|
||||||
|
|
||||||
let stop_mode_arg = Arg::new("stop-mode")
|
let stop_mode_arg = Arg::new("stop-mode")
|
||||||
.short('m')
|
.short('m')
|
||||||
@@ -918,6 +1007,12 @@ fn cli() -> Command {
|
|||||||
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
|
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
|
let hot_standby_arg = Arg::new("hot-standby")
|
||||||
|
.value_parser(value_parser!(bool))
|
||||||
|
.long("hot-standby")
|
||||||
|
.help("If set, the node will be a hot replica on the specified timeline")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
Command::new("Neon CLI")
|
Command::new("Neon CLI")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.version(GIT_VERSION)
|
.version(GIT_VERSION)
|
||||||
@@ -1025,37 +1120,42 @@ fn cli() -> Command {
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("pg")
|
Command::new("endpoint")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.about("Manage postgres instances")
|
.about("Manage postgres instances")
|
||||||
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
|
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
|
||||||
.subcommand(Command::new("create")
|
.subcommand(Command::new("create")
|
||||||
.about("Create a postgres compute node")
|
.about("Create a compute endpoint")
|
||||||
.arg(pg_node_arg.clone())
|
.arg(endpoint_id_arg.clone())
|
||||||
.arg(branch_name_arg.clone())
|
.arg(branch_name_arg.clone())
|
||||||
.arg(tenant_id_arg.clone())
|
.arg(tenant_id_arg.clone())
|
||||||
.arg(lsn_arg.clone())
|
.arg(lsn_arg.clone())
|
||||||
.arg(port_arg.clone())
|
.arg(pg_port_arg.clone())
|
||||||
|
.arg(http_port_arg.clone())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("config-only")
|
Arg::new("config-only")
|
||||||
.help("Don't do basebackup, create compute node with only config files")
|
.help("Don't do basebackup, create endpoint directory with only config files")
|
||||||
.long("config-only")
|
.long("config-only")
|
||||||
.required(false))
|
.required(false))
|
||||||
.arg(pg_version_arg.clone())
|
.arg(pg_version_arg.clone())
|
||||||
|
.arg(hot_standby_arg.clone())
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("start")
|
.subcommand(Command::new("start")
|
||||||
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
|
.about("Start postgres.\n If the endpoint doesn't exist yet, it is created.")
|
||||||
.arg(pg_node_arg.clone())
|
.arg(endpoint_id_arg.clone())
|
||||||
.arg(tenant_id_arg.clone())
|
.arg(tenant_id_arg.clone())
|
||||||
.arg(branch_name_arg)
|
.arg(branch_name_arg)
|
||||||
.arg(timeline_id_arg)
|
.arg(timeline_id_arg)
|
||||||
.arg(lsn_arg)
|
.arg(lsn_arg)
|
||||||
.arg(port_arg)
|
.arg(pg_port_arg)
|
||||||
|
.arg(http_port_arg)
|
||||||
.arg(pg_version_arg)
|
.arg(pg_version_arg)
|
||||||
|
.arg(hot_standby_arg)
|
||||||
|
.arg(safekeepers_arg)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("stop")
|
Command::new("stop")
|
||||||
.arg(pg_node_arg)
|
.arg(endpoint_id_arg)
|
||||||
.arg(tenant_id_arg)
|
.arg(tenant_id_arg)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("destroy")
|
Arg::new("destroy")
|
||||||
@@ -1067,6 +1167,13 @@ fn cli() -> Command {
|
|||||||
)
|
)
|
||||||
|
|
||||||
)
|
)
|
||||||
|
// Obsolete old name for 'endpoint'. We now just print an error if it's used.
|
||||||
|
.subcommand(
|
||||||
|
Command::new("pg")
|
||||||
|
.hide(true)
|
||||||
|
.arg(Arg::new("ignore-rest").allow_hyphen_values(true).num_args(0..).required(false))
|
||||||
|
.trailing_var_arg(true)
|
||||||
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("start")
|
Command::new("start")
|
||||||
.about("Start page server and safekeepers")
|
.about("Start page server and safekeepers")
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
//! Code to manage the storage broker
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the data for each safekeeper is stored in
|
||||||
|
//!
|
||||||
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
|
//!
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|||||||
@@ -1,555 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::fs::{self, File};
|
|
||||||
use std::io::Write;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::net::TcpStream;
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::{Command, Stdio};
|
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
|
||||||
use utils::{
|
|
||||||
id::{TenantId, TimelineId},
|
|
||||||
lsn::Lsn,
|
|
||||||
postgres_backend::AuthType,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
|
||||||
use crate::pageserver::PageServerNode;
|
|
||||||
use crate::postgresql_conf::PostgresConf;
|
|
||||||
|
|
||||||
//
|
|
||||||
// ComputeControlPlane
|
|
||||||
//
|
|
||||||
pub struct ComputeControlPlane {
|
|
||||||
base_port: u16,
|
|
||||||
pageserver: Arc<PageServerNode>,
|
|
||||||
pub nodes: BTreeMap<(TenantId, String), Arc<PostgresNode>>,
|
|
||||||
env: LocalEnv,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeControlPlane {
|
|
||||||
// Load current nodes with ports from data directories on disk
|
|
||||||
// Directory structure has the following layout:
|
|
||||||
// pgdatadirs
|
|
||||||
// |- tenants
|
|
||||||
// | |- <tenant_id>
|
|
||||||
// | | |- <node name>
|
|
||||||
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
|
||||||
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
|
||||||
|
|
||||||
let mut nodes = BTreeMap::default();
|
|
||||||
let pgdatadirspath = &env.pg_data_dirs_path();
|
|
||||||
|
|
||||||
for tenant_dir in fs::read_dir(pgdatadirspath)
|
|
||||||
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
|
||||||
{
|
|
||||||
let tenant_dir = tenant_dir?;
|
|
||||||
for timeline_dir in fs::read_dir(tenant_dir.path())
|
|
||||||
.with_context(|| format!("failed to list {}", tenant_dir.path().display()))?
|
|
||||||
{
|
|
||||||
let node = PostgresNode::from_dir_entry(timeline_dir?, &env, &pageserver)?;
|
|
||||||
nodes.insert((node.tenant_id, node.name.clone()), Arc::new(node));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ComputeControlPlane {
|
|
||||||
base_port: 55431,
|
|
||||||
pageserver,
|
|
||||||
nodes,
|
|
||||||
env,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_port(&mut self) -> u16 {
|
|
||||||
1 + self
|
|
||||||
.nodes
|
|
||||||
.values()
|
|
||||||
.map(|node| node.address.port())
|
|
||||||
.max()
|
|
||||||
.unwrap_or(self.base_port)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_node(
|
|
||||||
&mut self,
|
|
||||||
tenant_id: TenantId,
|
|
||||||
name: &str,
|
|
||||||
timeline_id: TimelineId,
|
|
||||||
lsn: Option<Lsn>,
|
|
||||||
port: Option<u16>,
|
|
||||||
pg_version: u32,
|
|
||||||
) -> Result<Arc<PostgresNode>> {
|
|
||||||
let port = port.unwrap_or_else(|| self.get_port());
|
|
||||||
let node = Arc::new(PostgresNode {
|
|
||||||
name: name.to_owned(),
|
|
||||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
|
||||||
env: self.env.clone(),
|
|
||||||
pageserver: Arc::clone(&self.pageserver),
|
|
||||||
is_test: false,
|
|
||||||
timeline_id,
|
|
||||||
lsn,
|
|
||||||
tenant_id,
|
|
||||||
uses_wal_proposer: false,
|
|
||||||
pg_version,
|
|
||||||
});
|
|
||||||
|
|
||||||
node.create_pgdata()?;
|
|
||||||
node.setup_pg_conf(self.env.pageserver.auth_type)?;
|
|
||||||
|
|
||||||
self.nodes
|
|
||||||
.insert((tenant_id, node.name.clone()), Arc::clone(&node));
|
|
||||||
|
|
||||||
Ok(node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct PostgresNode {
|
|
||||||
pub address: SocketAddr,
|
|
||||||
name: String,
|
|
||||||
pub env: LocalEnv,
|
|
||||||
pageserver: Arc<PageServerNode>,
|
|
||||||
is_test: bool,
|
|
||||||
pub timeline_id: TimelineId,
|
|
||||||
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
uses_wal_proposer: bool,
|
|
||||||
pg_version: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PostgresNode {
|
|
||||||
fn from_dir_entry(
|
|
||||||
entry: std::fs::DirEntry,
|
|
||||||
env: &LocalEnv,
|
|
||||||
pageserver: &Arc<PageServerNode>,
|
|
||||||
) -> Result<PostgresNode> {
|
|
||||||
if !entry.file_type()?.is_dir() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"PostgresNode::from_dir_entry failed: '{}' is not a directory",
|
|
||||||
entry.path().display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse data directory name
|
|
||||||
let fname = entry.file_name();
|
|
||||||
let name = fname.to_str().unwrap().to_string();
|
|
||||||
|
|
||||||
// Read config file into memory
|
|
||||||
let cfg_path = entry.path().join("postgresql.conf");
|
|
||||||
let cfg_path_str = cfg_path.to_string_lossy();
|
|
||||||
let mut conf_file = File::open(&cfg_path)
|
|
||||||
.with_context(|| format!("failed to open config file in {}", cfg_path_str))?;
|
|
||||||
let conf = PostgresConf::read(&mut conf_file)
|
|
||||||
.with_context(|| format!("failed to read config file in {}", cfg_path_str))?;
|
|
||||||
|
|
||||||
// Read a few options from the config file
|
|
||||||
let context = format!("in config file {}", cfg_path_str);
|
|
||||||
let port: u16 = conf.parse_field("port", &context)?;
|
|
||||||
let timeline_id: TimelineId = conf.parse_field("neon.timeline_id", &context)?;
|
|
||||||
let tenant_id: TenantId = conf.parse_field("neon.tenant_id", &context)?;
|
|
||||||
let uses_wal_proposer = conf.get("neon.safekeepers").is_some();
|
|
||||||
|
|
||||||
// Read postgres version from PG_VERSION file to determine which postgres version binary to use.
|
|
||||||
// If it doesn't exist, assume broken data directory and use default pg version.
|
|
||||||
let pg_version_path = entry.path().join("PG_VERSION");
|
|
||||||
|
|
||||||
let pg_version_str =
|
|
||||||
fs::read_to_string(pg_version_path).unwrap_or_else(|_| DEFAULT_PG_VERSION.to_string());
|
|
||||||
let pg_version = u32::from_str(&pg_version_str)?;
|
|
||||||
|
|
||||||
// parse recovery_target_lsn, if any
|
|
||||||
let recovery_target_lsn: Option<Lsn> =
|
|
||||||
conf.parse_field_optional("recovery_target_lsn", &context)?;
|
|
||||||
|
|
||||||
// ok now
|
|
||||||
Ok(PostgresNode {
|
|
||||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
|
||||||
name,
|
|
||||||
env: env.clone(),
|
|
||||||
pageserver: Arc::clone(pageserver),
|
|
||||||
is_test: false,
|
|
||||||
timeline_id,
|
|
||||||
lsn: recovery_target_lsn,
|
|
||||||
tenant_id,
|
|
||||||
uses_wal_proposer,
|
|
||||||
pg_version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
|
||||||
let pg_path = self.env.pg_bin_dir(pg_version)?.join("postgres");
|
|
||||||
let mut cmd = Command::new(pg_path);
|
|
||||||
|
|
||||||
cmd.arg("--sync-safekeepers")
|
|
||||||
.env_clear()
|
|
||||||
.env(
|
|
||||||
"LD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env(
|
|
||||||
"DYLD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
// Comment this to avoid capturing stderr (useful if command hangs)
|
|
||||||
.stderr(Stdio::piped());
|
|
||||||
|
|
||||||
if let Some(token) = auth_token {
|
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
|
||||||
}
|
|
||||||
|
|
||||||
let sync_handle = cmd
|
|
||||||
.spawn()
|
|
||||||
.expect("postgres --sync-safekeepers failed to start");
|
|
||||||
|
|
||||||
let sync_output = sync_handle
|
|
||||||
.wait_with_output()
|
|
||||||
.expect("postgres --sync-safekeepers failed");
|
|
||||||
if !sync_output.status.success() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"sync-safekeepers failed: '{}'",
|
|
||||||
String::from_utf8_lossy(&sync_output.stderr)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let lsn = Lsn::from_str(std::str::from_utf8(&sync_output.stdout)?.trim())?;
|
|
||||||
println!("Safekeepers synced on {}", lsn);
|
|
||||||
Ok(lsn)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get basebackup from the pageserver as a tar archive and extract it
|
|
||||||
/// to the `self.pgdata()` directory.
|
|
||||||
fn do_basebackup(&self, lsn: Option<Lsn>) -> Result<()> {
|
|
||||||
println!(
|
|
||||||
"Extracting base backup to create postgres instance: path={} port={}",
|
|
||||||
self.pgdata().display(),
|
|
||||||
self.address.port()
|
|
||||||
);
|
|
||||||
|
|
||||||
let sql = if let Some(lsn) = lsn {
|
|
||||||
format!("basebackup {} {} {}", self.tenant_id, self.timeline_id, lsn)
|
|
||||||
} else {
|
|
||||||
format!("basebackup {} {}", self.tenant_id, self.timeline_id)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut client = self
|
|
||||||
.pageserver
|
|
||||||
.page_server_psql_client()
|
|
||||||
.context("connecting to page server failed")?;
|
|
||||||
|
|
||||||
let copyreader = client
|
|
||||||
.copy_out(sql.as_str())
|
|
||||||
.context("page server 'basebackup' command failed")?;
|
|
||||||
|
|
||||||
// Read the archive directly from the `CopyOutReader`
|
|
||||||
//
|
|
||||||
// Set `ignore_zeros` so that unpack() reads all the Copy data and
|
|
||||||
// doesn't stop at the end-of-archive marker. Otherwise, if the server
|
|
||||||
// sends an Error after finishing the tarball, we will not notice it.
|
|
||||||
let mut ar = tar::Archive::new(copyreader);
|
|
||||||
ar.set_ignore_zeros(true);
|
|
||||||
ar.unpack(&self.pgdata())
|
|
||||||
.context("extracting base backup failed")?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_pgdata(&self) -> Result<()> {
|
|
||||||
fs::create_dir_all(self.pgdata()).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"could not create data directory {}",
|
|
||||||
self.pgdata().display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
fs::set_permissions(self.pgdata().as_path(), fs::Permissions::from_mode(0o700))
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"could not set permissions in data directory {}",
|
|
||||||
self.pgdata().display()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write postgresql.conf with default configuration
|
|
||||||
// and PG_VERSION file to the data directory of a new node.
|
|
||||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
|
||||||
let mut conf = PostgresConf::new();
|
|
||||||
conf.append("max_wal_senders", "10");
|
|
||||||
conf.append("wal_log_hints", "off");
|
|
||||||
conf.append("max_replication_slots", "10");
|
|
||||||
conf.append("hot_standby", "on");
|
|
||||||
conf.append("shared_buffers", "1MB");
|
|
||||||
conf.append("fsync", "off");
|
|
||||||
conf.append("max_connections", "100");
|
|
||||||
conf.append("wal_level", "replica");
|
|
||||||
// wal_sender_timeout is the maximum time to wait for WAL replication.
|
|
||||||
// It also defines how often the walreciever will send a feedback message to the wal sender.
|
|
||||||
conf.append("wal_sender_timeout", "5s");
|
|
||||||
conf.append("listen_addresses", &self.address.ip().to_string());
|
|
||||||
conf.append("port", &self.address.port().to_string());
|
|
||||||
conf.append("wal_keep_size", "0");
|
|
||||||
// walproposer panics when basebackup is invalid, it is pointless to restart in this case.
|
|
||||||
conf.append("restart_after_crash", "off");
|
|
||||||
|
|
||||||
// Configure the node to fetch pages from pageserver
|
|
||||||
let pageserver_connstr = {
|
|
||||||
let config = &self.pageserver.pg_connection_config;
|
|
||||||
let (host, port) = (config.host(), config.port());
|
|
||||||
|
|
||||||
// Set up authentication
|
|
||||||
//
|
|
||||||
// $NEON_AUTH_TOKEN will be replaced with value from environment
|
|
||||||
// variable during compute pg startup. It is done this way because
|
|
||||||
// otherwise user will be able to retrieve the value using SHOW
|
|
||||||
// command or pg_settings
|
|
||||||
let password = if let AuthType::NeonJWT = auth_type {
|
|
||||||
"$NEON_AUTH_TOKEN"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
};
|
|
||||||
// NOTE avoiding spaces in connection string, because it is less error prone if we forward it somewhere.
|
|
||||||
// Also note that not all parameters are supported here. Because in compute we substitute $NEON_AUTH_TOKEN
|
|
||||||
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
|
||||||
// uses only needed variables namely host, port, user, password.
|
|
||||||
format!("postgresql://no_user:{password}@{host}:{port}")
|
|
||||||
};
|
|
||||||
conf.append("shared_preload_libraries", "neon");
|
|
||||||
conf.append_line("");
|
|
||||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
|
||||||
if let AuthType::NeonJWT = auth_type {
|
|
||||||
conf.append("neon.safekeeper_token_env", "$NEON_AUTH_TOKEN");
|
|
||||||
}
|
|
||||||
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
|
||||||
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
|
||||||
if let Some(lsn) = self.lsn {
|
|
||||||
conf.append("recovery_target_lsn", &lsn.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
conf.append_line("");
|
|
||||||
// Configure backpressure
|
|
||||||
// - Replication write lag depends on how fast the walreceiver can process incoming WAL.
|
|
||||||
// This lag determines latency of get_page_at_lsn. Speed of applying WAL is about 10MB/sec,
|
|
||||||
// so to avoid expiration of 1 minute timeout, this lag should not be larger than 600MB.
|
|
||||||
// Actually latency should be much smaller (better if < 1sec). But we assume that recently
|
|
||||||
// updates pages are not requested from pageserver.
|
|
||||||
// - Replication flush lag depends on speed of persisting data by checkpointer (creation of
|
|
||||||
// delta/image layers) and advancing disk_consistent_lsn. Safekeepers are able to
|
|
||||||
// remove/archive WAL only beyond disk_consistent_lsn. Too large a lag can cause long
|
|
||||||
// recovery time (in case of pageserver crash) and disk space overflow at safekeepers.
|
|
||||||
// - Replication apply lag depends on speed of uploading changes to S3 by uploader thread.
|
|
||||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
|
||||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
|
||||||
// (if they are not able to upload WAL to S3).
|
|
||||||
conf.append("max_replication_write_lag", "15MB");
|
|
||||||
conf.append("max_replication_flush_lag", "10GB");
|
|
||||||
|
|
||||||
if !self.env.safekeepers.is_empty() {
|
|
||||||
// Configure the node to connect to the safekeepers
|
|
||||||
conf.append("synchronous_standby_names", "walproposer");
|
|
||||||
|
|
||||||
let safekeepers = self
|
|
||||||
.env
|
|
||||||
.safekeepers
|
|
||||||
.iter()
|
|
||||||
.map(|sk| format!("localhost:{}", sk.pg_port))
|
|
||||||
.collect::<Vec<String>>()
|
|
||||||
.join(",");
|
|
||||||
conf.append("neon.safekeepers", &safekeepers);
|
|
||||||
} else {
|
|
||||||
// We only use setup without safekeepers for tests,
|
|
||||||
// and don't care about data durability on pageserver,
|
|
||||||
// so set more relaxed synchronous_commit.
|
|
||||||
conf.append("synchronous_commit", "remote_write");
|
|
||||||
|
|
||||||
// Configure the node to stream WAL directly to the pageserver
|
|
||||||
// This isn't really a supported configuration, but can be useful for
|
|
||||||
// testing.
|
|
||||||
conf.append("synchronous_standby_names", "pageserver");
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
|
||||||
file.write_all(conf.to_string().as_bytes())?;
|
|
||||||
|
|
||||||
let mut file = File::create(self.pgdata().join("PG_VERSION"))?;
|
|
||||||
file.write_all(self.pg_version.to_string().as_bytes())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_basebackup(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
let backup_lsn = if let Some(lsn) = self.lsn {
|
|
||||||
Some(lsn)
|
|
||||||
} else if self.uses_wal_proposer {
|
|
||||||
// LSN 0 means that it is bootstrap and we need to download just
|
|
||||||
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
|
||||||
// procedure evolves quite actively right now, so let's think about it again
|
|
||||||
// when things would be more stable (TODO).
|
|
||||||
let lsn = self.sync_safekeepers(auth_token, self.pg_version)?;
|
|
||||||
if lsn == Lsn(0) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(lsn)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
self.do_basebackup(backup_lsn)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pgdata(&self) -> PathBuf {
|
|
||||||
self.env.pg_data_dir(&self.tenant_id, &self.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn status(&self) -> &str {
|
|
||||||
let timeout = Duration::from_millis(300);
|
|
||||||
let has_pidfile = self.pgdata().join("postmaster.pid").exists();
|
|
||||||
let can_connect = TcpStream::connect_timeout(&self.address, timeout).is_ok();
|
|
||||||
|
|
||||||
match (has_pidfile, can_connect) {
|
|
||||||
(true, true) => "running",
|
|
||||||
(false, false) => "stopped",
|
|
||||||
(true, false) => "crashed",
|
|
||||||
(false, true) => "running, no pidfile",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
|
||||||
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
|
||||||
let mut cmd = Command::new(pg_ctl_path);
|
|
||||||
cmd.args(
|
|
||||||
[
|
|
||||||
&[
|
|
||||||
"-D",
|
|
||||||
self.pgdata().to_str().unwrap(),
|
|
||||||
"-l",
|
|
||||||
self.pgdata().join("pg.log").to_str().unwrap(),
|
|
||||||
"-w", //wait till pg_ctl actually does what was asked
|
|
||||||
],
|
|
||||||
args,
|
|
||||||
]
|
|
||||||
.concat(),
|
|
||||||
)
|
|
||||||
.env_clear()
|
|
||||||
.env(
|
|
||||||
"LD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env(
|
|
||||||
"DYLD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
|
||||||
);
|
|
||||||
if let Some(token) = auth_token {
|
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
|
||||||
}
|
|
||||||
|
|
||||||
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
|
||||||
if !pg_ctl.status.success() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"pg_ctl failed, exit code: {}, stdout: {}, stderr: {}",
|
|
||||||
pg_ctl.status,
|
|
||||||
String::from_utf8_lossy(&pg_ctl.stdout),
|
|
||||||
String::from_utf8_lossy(&pg_ctl.stderr),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
// Bail if the node already running.
|
|
||||||
if self.status() == "running" {
|
|
||||||
anyhow::bail!("The node is already running");
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. We always start compute node from scratch, so
|
|
||||||
// if old dir exists, preserve 'postgresql.conf' and drop the directory
|
|
||||||
let postgresql_conf_path = self.pgdata().join("postgresql.conf");
|
|
||||||
let postgresql_conf = fs::read(&postgresql_conf_path).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"failed to read config file in {}",
|
|
||||||
postgresql_conf_path.to_str().unwrap()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
|
||||||
self.create_pgdata()?;
|
|
||||||
|
|
||||||
// 2. Bring back config files
|
|
||||||
fs::write(&postgresql_conf_path, postgresql_conf)?;
|
|
||||||
|
|
||||||
// 3. Load basebackup
|
|
||||||
self.load_basebackup(auth_token)?;
|
|
||||||
|
|
||||||
if self.lsn.is_some() {
|
|
||||||
File::create(self.pgdata().join("standby.signal"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Finally start the compute node postgres
|
|
||||||
println!("Starting postgres node at '{}'", self.connstr());
|
|
||||||
self.pg_ctl(&["start"], auth_token)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn restart(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
self.pg_ctl(&["restart"], auth_token)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stop(&self, destroy: bool) -> Result<()> {
|
|
||||||
// If we are going to destroy data directory,
|
|
||||||
// use immediate shutdown mode, otherwise,
|
|
||||||
// shutdown gracefully to leave the data directory sane.
|
|
||||||
//
|
|
||||||
// Compute node always starts from scratch, so stop
|
|
||||||
// without destroy only used for testing and debugging.
|
|
||||||
//
|
|
||||||
if destroy {
|
|
||||||
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
|
||||||
println!(
|
|
||||||
"Destroying postgres data directory '{}'",
|
|
||||||
self.pgdata().to_str().unwrap()
|
|
||||||
);
|
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
|
||||||
} else {
|
|
||||||
self.pg_ctl(&["stop"], &None)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn connstr(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"host={} port={} user={} dbname={}",
|
|
||||||
self.address.ip(),
|
|
||||||
self.address.port(),
|
|
||||||
"cloud_admin",
|
|
||||||
"postgres"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: cache that in control plane
|
|
||||||
pub fn whoami(&self) -> String {
|
|
||||||
let output = Command::new("whoami")
|
|
||||||
.output()
|
|
||||||
.expect("failed to execute whoami");
|
|
||||||
|
|
||||||
assert!(output.status.success(), "whoami failed");
|
|
||||||
|
|
||||||
String::from_utf8(output.stdout).unwrap().trim().to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for PostgresNode {
|
|
||||||
// destructor to clean up state after test is done
|
|
||||||
// XXX: we may detect failed test by setting some flag in catch_unwind()
|
|
||||||
// and checking it here. But let just clean datadirs on start.
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if self.is_test {
|
|
||||||
let _ = self.stop(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
613
control_plane/src/endpoint.rs
Normal file
613
control_plane/src/endpoint.rs
Normal file
@@ -0,0 +1,613 @@
|
|||||||
|
//! Code to manage compute endpoints
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the data for each endpoint is stored in
|
||||||
|
//!
|
||||||
|
//! .neon/endpoints/<endpoint id>
|
||||||
|
//!
|
||||||
|
//! Some basic information about the endpoint, like the tenant and timeline IDs,
|
||||||
|
//! are stored in the `endpoint.json` file. The `endpoint.json` file is created
|
||||||
|
//! when the endpoint is created, and doesn't change afterwards.
|
||||||
|
//!
|
||||||
|
//! The endpoint is managed by the `compute_ctl` binary. When an endpoint is
|
||||||
|
//! started, we launch `compute_ctl` It synchronizes the safekeepers, downloads
|
||||||
|
//! the basebackup from the pageserver to initialize the the data directory, and
|
||||||
|
//! finally launches the PostgreSQL process. It watches the PostgreSQL process
|
||||||
|
//! until it exits.
|
||||||
|
//!
|
||||||
|
//! When an endpoint is created, a `postgresql.conf` file is also created in
|
||||||
|
//! the endpoint's directory. The file can be modified before starting PostgreSQL.
|
||||||
|
//! However, the `postgresql.conf` file in the endpoint directory is not used directly
|
||||||
|
//! by PostgreSQL. It is passed to `compute_ctl`, and `compute_ctl` writes another
|
||||||
|
//! copy of it in the data directory.
|
||||||
|
//!
|
||||||
|
//! Directory contents:
|
||||||
|
//!
|
||||||
|
//! ```ignore
|
||||||
|
//! .neon/endpoints/main/
|
||||||
|
//! compute.log - log output of `compute_ctl` and `postgres`
|
||||||
|
//! endpoint.json - serialized `EndpointConf` struct
|
||||||
|
//! postgresql.conf - postgresql settings
|
||||||
|
//! spec.json - passed to `compute_ctl`
|
||||||
|
//! pgdata/
|
||||||
|
//! postgresql.conf - copy of postgresql.conf created by `compute_ctl`
|
||||||
|
//! zenith.signal
|
||||||
|
//! <other PostgreSQL files>
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::net::TcpStream;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::process::Command;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
|
use utils::id::{NodeId, TenantId, TimelineId};
|
||||||
|
|
||||||
|
use crate::local_env::LocalEnv;
|
||||||
|
use crate::pageserver::PageServerNode;
|
||||||
|
use crate::postgresql_conf::PostgresConf;
|
||||||
|
|
||||||
|
use compute_api::responses::{ComputeState, ComputeStatus};
|
||||||
|
use compute_api::spec::{Cluster, ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
|
// contents of a endpoint.json file
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
|
pub struct EndpointConf {
|
||||||
|
endpoint_id: String,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
mode: ComputeMode,
|
||||||
|
pg_port: u16,
|
||||||
|
http_port: u16,
|
||||||
|
pg_version: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// ComputeControlPlane
|
||||||
|
//
|
||||||
|
pub struct ComputeControlPlane {
|
||||||
|
base_port: u16,
|
||||||
|
|
||||||
|
// endpoint ID is the key
|
||||||
|
pub endpoints: BTreeMap<String, Arc<Endpoint>>,
|
||||||
|
|
||||||
|
env: LocalEnv,
|
||||||
|
pageserver: Arc<PageServerNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ComputeControlPlane {
|
||||||
|
// Load current endpoints from the endpoints/ subdirectories
|
||||||
|
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
||||||
|
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
||||||
|
|
||||||
|
let mut endpoints = BTreeMap::default();
|
||||||
|
for endpoint_dir in std::fs::read_dir(env.endpoints_path())
|
||||||
|
.with_context(|| format!("failed to list {}", env.endpoints_path().display()))?
|
||||||
|
{
|
||||||
|
let ep = Endpoint::from_dir_entry(endpoint_dir?, &env, &pageserver)?;
|
||||||
|
endpoints.insert(ep.endpoint_id.clone(), Arc::new(ep));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ComputeControlPlane {
|
||||||
|
base_port: 55431,
|
||||||
|
endpoints,
|
||||||
|
env,
|
||||||
|
pageserver,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_port(&mut self) -> u16 {
|
||||||
|
1 + self
|
||||||
|
.endpoints
|
||||||
|
.values()
|
||||||
|
.map(|ep| std::cmp::max(ep.pg_address.port(), ep.http_address.port()))
|
||||||
|
.max()
|
||||||
|
.unwrap_or(self.base_port)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new_endpoint(
|
||||||
|
&mut self,
|
||||||
|
endpoint_id: &str,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
pg_port: Option<u16>,
|
||||||
|
http_port: Option<u16>,
|
||||||
|
pg_version: u32,
|
||||||
|
mode: ComputeMode,
|
||||||
|
) -> Result<Arc<Endpoint>> {
|
||||||
|
let pg_port = pg_port.unwrap_or_else(|| self.get_port());
|
||||||
|
let http_port = http_port.unwrap_or_else(|| self.get_port() + 1);
|
||||||
|
let ep = Arc::new(Endpoint {
|
||||||
|
endpoint_id: endpoint_id.to_owned(),
|
||||||
|
pg_address: SocketAddr::new("127.0.0.1".parse().unwrap(), pg_port),
|
||||||
|
http_address: SocketAddr::new("127.0.0.1".parse().unwrap(), http_port),
|
||||||
|
env: self.env.clone(),
|
||||||
|
pageserver: Arc::clone(&self.pageserver),
|
||||||
|
timeline_id,
|
||||||
|
mode,
|
||||||
|
tenant_id,
|
||||||
|
pg_version,
|
||||||
|
});
|
||||||
|
|
||||||
|
ep.create_endpoint_dir()?;
|
||||||
|
std::fs::write(
|
||||||
|
ep.endpoint_path().join("endpoint.json"),
|
||||||
|
serde_json::to_string_pretty(&EndpointConf {
|
||||||
|
endpoint_id: endpoint_id.to_string(),
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
mode,
|
||||||
|
http_port,
|
||||||
|
pg_port,
|
||||||
|
pg_version,
|
||||||
|
})?,
|
||||||
|
)?;
|
||||||
|
std::fs::write(
|
||||||
|
ep.endpoint_path().join("postgresql.conf"),
|
||||||
|
ep.setup_pg_conf()?.to_string(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.endpoints
|
||||||
|
.insert(ep.endpoint_id.clone(), Arc::clone(&ep));
|
||||||
|
|
||||||
|
Ok(ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Endpoint {
|
||||||
|
/// used as the directory name
|
||||||
|
endpoint_id: String,
|
||||||
|
pub tenant_id: TenantId,
|
||||||
|
pub timeline_id: TimelineId,
|
||||||
|
pub mode: ComputeMode,
|
||||||
|
|
||||||
|
// port and address of the Postgres server and `compute_ctl`'s HTTP API
|
||||||
|
pub pg_address: SocketAddr,
|
||||||
|
pub http_address: SocketAddr,
|
||||||
|
|
||||||
|
// postgres major version in the format: 14, 15, etc.
|
||||||
|
pg_version: u32,
|
||||||
|
|
||||||
|
// These are not part of the endpoint as such, but the environment
|
||||||
|
// the endpoint runs in.
|
||||||
|
pub env: LocalEnv,
|
||||||
|
pageserver: Arc<PageServerNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Endpoint {
|
||||||
|
fn from_dir_entry(
|
||||||
|
entry: std::fs::DirEntry,
|
||||||
|
env: &LocalEnv,
|
||||||
|
pageserver: &Arc<PageServerNode>,
|
||||||
|
) -> Result<Endpoint> {
|
||||||
|
if !entry.file_type()?.is_dir() {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Endpoint::from_dir_entry failed: '{}' is not a directory",
|
||||||
|
entry.path().display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse data directory name
|
||||||
|
let fname = entry.file_name();
|
||||||
|
let endpoint_id = fname.to_str().unwrap().to_string();
|
||||||
|
|
||||||
|
// Read the endpoint.json file
|
||||||
|
let conf: EndpointConf =
|
||||||
|
serde_json::from_slice(&std::fs::read(entry.path().join("endpoint.json"))?)?;
|
||||||
|
|
||||||
|
Ok(Endpoint {
|
||||||
|
pg_address: SocketAddr::new("127.0.0.1".parse().unwrap(), conf.pg_port),
|
||||||
|
http_address: SocketAddr::new("127.0.0.1".parse().unwrap(), conf.http_port),
|
||||||
|
endpoint_id,
|
||||||
|
env: env.clone(),
|
||||||
|
pageserver: Arc::clone(pageserver),
|
||||||
|
timeline_id: conf.timeline_id,
|
||||||
|
mode: conf.mode,
|
||||||
|
tenant_id: conf.tenant_id,
|
||||||
|
pg_version: conf.pg_version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_endpoint_dir(&self) -> Result<()> {
|
||||||
|
std::fs::create_dir_all(self.endpoint_path()).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"could not create endpoint directory {}",
|
||||||
|
self.endpoint_path().display()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate postgresql.conf with default configuration
|
||||||
|
fn setup_pg_conf(&self) -> Result<PostgresConf> {
|
||||||
|
let mut conf = PostgresConf::new();
|
||||||
|
conf.append("max_wal_senders", "10");
|
||||||
|
conf.append("wal_log_hints", "off");
|
||||||
|
conf.append("max_replication_slots", "10");
|
||||||
|
conf.append("hot_standby", "on");
|
||||||
|
conf.append("shared_buffers", "1MB");
|
||||||
|
conf.append("fsync", "off");
|
||||||
|
conf.append("max_connections", "100");
|
||||||
|
conf.append("wal_level", "replica");
|
||||||
|
// wal_sender_timeout is the maximum time to wait for WAL replication.
|
||||||
|
// It also defines how often the walreciever will send a feedback message to the wal sender.
|
||||||
|
conf.append("wal_sender_timeout", "5s");
|
||||||
|
conf.append("listen_addresses", &self.pg_address.ip().to_string());
|
||||||
|
conf.append("port", &self.pg_address.port().to_string());
|
||||||
|
conf.append("wal_keep_size", "0");
|
||||||
|
// walproposer panics when basebackup is invalid, it is pointless to restart in this case.
|
||||||
|
conf.append("restart_after_crash", "off");
|
||||||
|
|
||||||
|
// Load the 'neon' extension
|
||||||
|
conf.append("shared_preload_libraries", "neon");
|
||||||
|
|
||||||
|
conf.append_line("");
|
||||||
|
// Replication-related configurations, such as WAL sending
|
||||||
|
match &self.mode {
|
||||||
|
ComputeMode::Primary => {
|
||||||
|
// Configure backpressure
|
||||||
|
// - Replication write lag depends on how fast the walreceiver can process incoming WAL.
|
||||||
|
// This lag determines latency of get_page_at_lsn. Speed of applying WAL is about 10MB/sec,
|
||||||
|
// so to avoid expiration of 1 minute timeout, this lag should not be larger than 600MB.
|
||||||
|
// Actually latency should be much smaller (better if < 1sec). But we assume that recently
|
||||||
|
// updates pages are not requested from pageserver.
|
||||||
|
// - Replication flush lag depends on speed of persisting data by checkpointer (creation of
|
||||||
|
// delta/image layers) and advancing disk_consistent_lsn. Safekeepers are able to
|
||||||
|
// remove/archive WAL only beyond disk_consistent_lsn. Too large a lag can cause long
|
||||||
|
// recovery time (in case of pageserver crash) and disk space overflow at safekeepers.
|
||||||
|
// - Replication apply lag depends on speed of uploading changes to S3 by uploader thread.
|
||||||
|
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||||
|
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||||
|
// (if they are not able to upload WAL to S3).
|
||||||
|
conf.append("max_replication_write_lag", "15MB");
|
||||||
|
conf.append("max_replication_flush_lag", "10GB");
|
||||||
|
|
||||||
|
if !self.env.safekeepers.is_empty() {
|
||||||
|
// Configure Postgres to connect to the safekeepers
|
||||||
|
conf.append("synchronous_standby_names", "walproposer");
|
||||||
|
|
||||||
|
let safekeepers = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.map(|sk| format!("localhost:{}", sk.pg_port))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(",");
|
||||||
|
conf.append("neon.safekeepers", &safekeepers);
|
||||||
|
} else {
|
||||||
|
// We only use setup without safekeepers for tests,
|
||||||
|
// and don't care about data durability on pageserver,
|
||||||
|
// so set more relaxed synchronous_commit.
|
||||||
|
conf.append("synchronous_commit", "remote_write");
|
||||||
|
|
||||||
|
// Configure the node to stream WAL directly to the pageserver
|
||||||
|
// This isn't really a supported configuration, but can be useful for
|
||||||
|
// testing.
|
||||||
|
conf.append("synchronous_standby_names", "pageserver");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ComputeMode::Static(lsn) => {
|
||||||
|
conf.append("recovery_target_lsn", &lsn.to_string());
|
||||||
|
}
|
||||||
|
ComputeMode::Replica => {
|
||||||
|
assert!(!self.env.safekeepers.is_empty());
|
||||||
|
|
||||||
|
// TODO: use future host field from safekeeper spec
|
||||||
|
// Pass the list of safekeepers to the replica so that it can connect to any of them,
|
||||||
|
// whichever is availiable.
|
||||||
|
let sk_ports = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.pg_port.to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",");
|
||||||
|
let sk_hosts = vec!["localhost"; self.env.safekeepers.len()].join(",");
|
||||||
|
|
||||||
|
let connstr = format!(
|
||||||
|
"host={} port={} options='-c timeline_id={} tenant_id={}' application_name=replica replication=true",
|
||||||
|
sk_hosts,
|
||||||
|
sk_ports,
|
||||||
|
&self.timeline_id.to_string(),
|
||||||
|
&self.tenant_id.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let slot_name = format!("repl_{}_", self.timeline_id);
|
||||||
|
conf.append("primary_conninfo", connstr.as_str());
|
||||||
|
conf.append("primary_slot_name", slot_name.as_str());
|
||||||
|
conf.append("hot_standby", "on");
|
||||||
|
// prefetching of blocks referenced in WAL doesn't make sense for us
|
||||||
|
// Neon hot standby ignores pages that are not in the shared_buffers
|
||||||
|
if self.pg_version >= 15 {
|
||||||
|
conf.append("recovery_prefetch", "off");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn endpoint_path(&self) -> PathBuf {
|
||||||
|
self.env.endpoints_path().join(&self.endpoint_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pgdata(&self) -> PathBuf {
|
||||||
|
self.endpoint_path().join("pgdata")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn status(&self) -> &str {
|
||||||
|
let timeout = Duration::from_millis(300);
|
||||||
|
let has_pidfile = self.pgdata().join("postmaster.pid").exists();
|
||||||
|
let can_connect = TcpStream::connect_timeout(&self.pg_address, timeout).is_ok();
|
||||||
|
|
||||||
|
match (has_pidfile, can_connect) {
|
||||||
|
(true, true) => "running",
|
||||||
|
(false, false) => "stopped",
|
||||||
|
(true, false) => "crashed",
|
||||||
|
(false, true) => "running, no pidfile",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||||
|
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
||||||
|
let mut cmd = Command::new(&pg_ctl_path);
|
||||||
|
cmd.args(
|
||||||
|
[
|
||||||
|
&[
|
||||||
|
"-D",
|
||||||
|
self.pgdata().to_str().unwrap(),
|
||||||
|
"-w", //wait till pg_ctl actually does what was asked
|
||||||
|
],
|
||||||
|
args,
|
||||||
|
]
|
||||||
|
.concat(),
|
||||||
|
)
|
||||||
|
.env_clear()
|
||||||
|
.env(
|
||||||
|
"LD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
)
|
||||||
|
.env(
|
||||||
|
"DYLD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Pass authentication token used for the connections to pageserver and safekeepers
|
||||||
|
if let Some(token) = auth_token {
|
||||||
|
cmd.env("NEON_AUTH_TOKEN", token);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pg_ctl = cmd
|
||||||
|
.output()
|
||||||
|
.context(format!("{} failed", pg_ctl_path.display()))?;
|
||||||
|
if !pg_ctl.status.success() {
|
||||||
|
anyhow::bail!(
|
||||||
|
"pg_ctl failed, exit code: {}, stdout: {}, stderr: {}",
|
||||||
|
pg_ctl.status,
|
||||||
|
String::from_utf8_lossy(&pg_ctl.stdout),
|
||||||
|
String::from_utf8_lossy(&pg_ctl.stderr),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start(&self, auth_token: &Option<String>, safekeepers: Vec<NodeId>) -> Result<()> {
|
||||||
|
if self.status() == "running" {
|
||||||
|
anyhow::bail!("The endpoint is already running");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slurp the endpoints/<endpoint id>/postgresql.conf file into
|
||||||
|
// memory. We will include it in the spec file that we pass to
|
||||||
|
// `compute_ctl`, and `compute_ctl` will write it to the postgresql.conf
|
||||||
|
// in the data directory.
|
||||||
|
let postgresql_conf_path = self.endpoint_path().join("postgresql.conf");
|
||||||
|
let postgresql_conf = match std::fs::read(&postgresql_conf_path) {
|
||||||
|
Ok(content) => String::from_utf8(content)?,
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::NotFound => "".to_string(),
|
||||||
|
Err(e) => {
|
||||||
|
return Err(anyhow::Error::new(e).context(format!(
|
||||||
|
"failed to read config file in {}",
|
||||||
|
postgresql_conf_path.to_str().unwrap()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// We always start the compute node from scratch, so if the Postgres
|
||||||
|
// data dir exists from a previous launch, remove it first.
|
||||||
|
if self.pgdata().exists() {
|
||||||
|
std::fs::remove_dir_all(self.pgdata())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pageserver_connstring = {
|
||||||
|
let config = &self.pageserver.pg_connection_config;
|
||||||
|
let (host, port) = (config.host(), config.port());
|
||||||
|
|
||||||
|
// NOTE: avoid spaces in connection string, because it is less error prone if we forward it somewhere.
|
||||||
|
format!("postgresql://no_user@{host}:{port}")
|
||||||
|
};
|
||||||
|
let mut safekeeper_connstrings = Vec::new();
|
||||||
|
if self.mode == ComputeMode::Primary {
|
||||||
|
for sk_id in safekeepers {
|
||||||
|
let sk = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.find(|node| node.id == sk_id)
|
||||||
|
.ok_or_else(|| anyhow!("safekeeper {sk_id} does not exist"))?;
|
||||||
|
safekeeper_connstrings.push(format!("127.0.0.1:{}", sk.pg_port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create spec file
|
||||||
|
let spec = ComputeSpec {
|
||||||
|
format_version: 1.0,
|
||||||
|
operation_uuid: None,
|
||||||
|
cluster: Cluster {
|
||||||
|
cluster_id: None, // project ID: not used
|
||||||
|
name: None, // project name: not used
|
||||||
|
state: None,
|
||||||
|
roles: vec![],
|
||||||
|
databases: vec![],
|
||||||
|
settings: None,
|
||||||
|
postgresql_conf: Some(postgresql_conf),
|
||||||
|
},
|
||||||
|
delta_operations: None,
|
||||||
|
tenant_id: Some(self.tenant_id),
|
||||||
|
timeline_id: Some(self.timeline_id),
|
||||||
|
mode: self.mode,
|
||||||
|
pageserver_connstring: Some(pageserver_connstring),
|
||||||
|
safekeeper_connstrings,
|
||||||
|
storage_auth_token: auth_token.clone(),
|
||||||
|
};
|
||||||
|
let spec_path = self.endpoint_path().join("spec.json");
|
||||||
|
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
|
||||||
|
|
||||||
|
// Open log file. We'll redirect the stdout and stderr of `compute_ctl` to it.
|
||||||
|
let logfile = std::fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.append(true)
|
||||||
|
.open(self.endpoint_path().join("compute.log"))?;
|
||||||
|
|
||||||
|
// Launch compute_ctl
|
||||||
|
println!("Starting postgres node at '{}'", self.connstr());
|
||||||
|
let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl"));
|
||||||
|
cmd.args(["--http-port", &self.http_address.port().to_string()])
|
||||||
|
.args(["--pgdata", self.pgdata().to_str().unwrap()])
|
||||||
|
.args(["--connstr", &self.connstr()])
|
||||||
|
.args([
|
||||||
|
"--spec-path",
|
||||||
|
self.endpoint_path().join("spec.json").to_str().unwrap(),
|
||||||
|
])
|
||||||
|
.args([
|
||||||
|
"--pgbin",
|
||||||
|
self.env
|
||||||
|
.pg_bin_dir(self.pg_version)?
|
||||||
|
.join("postgres")
|
||||||
|
.to_str()
|
||||||
|
.unwrap(),
|
||||||
|
])
|
||||||
|
.stdin(std::process::Stdio::null())
|
||||||
|
.stderr(logfile.try_clone()?)
|
||||||
|
.stdout(logfile);
|
||||||
|
let _child = cmd.spawn()?;
|
||||||
|
|
||||||
|
// Wait for it to start
|
||||||
|
let mut attempt = 0;
|
||||||
|
const ATTEMPT_INTERVAL: Duration = Duration::from_millis(100);
|
||||||
|
const MAX_ATTEMPTS: u32 = 10 * 30; // Wait up to 30 s
|
||||||
|
loop {
|
||||||
|
attempt += 1;
|
||||||
|
match self.get_status() {
|
||||||
|
Ok(state) => {
|
||||||
|
match state.status {
|
||||||
|
ComputeStatus::Init => {
|
||||||
|
if attempt == MAX_ATTEMPTS {
|
||||||
|
bail!("compute startup timed out; still in Init state");
|
||||||
|
}
|
||||||
|
// keep retrying
|
||||||
|
}
|
||||||
|
ComputeStatus::Running => {
|
||||||
|
// All good!
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ComputeStatus::Failed => {
|
||||||
|
bail!(
|
||||||
|
"compute startup failed: {}",
|
||||||
|
state
|
||||||
|
.error
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("<no error from compute_ctl>")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
ComputeStatus::Empty
|
||||||
|
| ComputeStatus::ConfigurationPending
|
||||||
|
| ComputeStatus::Configuration => {
|
||||||
|
bail!("unexpected compute status: {:?}", state.status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if attempt == MAX_ATTEMPTS {
|
||||||
|
return Err(e).context(
|
||||||
|
"timed out waiting to connect to compute_ctl HTTP; last error: {e}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::thread::sleep(ATTEMPT_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the /status HTTP API
|
||||||
|
pub fn get_status(&self) -> Result<ComputeState> {
|
||||||
|
let client = reqwest::blocking::Client::new();
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.request(
|
||||||
|
reqwest::Method::GET,
|
||||||
|
format!(
|
||||||
|
"http://{}:{}/status",
|
||||||
|
self.http_address.ip(),
|
||||||
|
self.http_address.port()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
// Interpret the response
|
||||||
|
let status = response.status();
|
||||||
|
if !(status.is_client_error() || status.is_server_error()) {
|
||||||
|
Ok(response.json()?)
|
||||||
|
} else {
|
||||||
|
// reqwest does not export its error construction utility functions, so let's craft the message ourselves
|
||||||
|
let url = response.url().to_owned();
|
||||||
|
let msg = match response.text() {
|
||||||
|
Ok(err_body) => format!("Error: {}", err_body),
|
||||||
|
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||||
|
};
|
||||||
|
Err(anyhow::anyhow!(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stop(&self, destroy: bool) -> Result<()> {
|
||||||
|
// If we are going to destroy data directory,
|
||||||
|
// use immediate shutdown mode, otherwise,
|
||||||
|
// shutdown gracefully to leave the data directory sane.
|
||||||
|
//
|
||||||
|
// Postgres is always started from scratch, so stop
|
||||||
|
// without destroy only used for testing and debugging.
|
||||||
|
//
|
||||||
|
if destroy {
|
||||||
|
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
||||||
|
println!(
|
||||||
|
"Destroying postgres data directory '{}'",
|
||||||
|
self.pgdata().to_str().unwrap()
|
||||||
|
);
|
||||||
|
std::fs::remove_dir_all(self.endpoint_path())?;
|
||||||
|
} else {
|
||||||
|
self.pg_ctl(&["stop"], &None)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connstr(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"postgresql://{}@{}:{}/{}",
|
||||||
|
"cloud_admin",
|
||||||
|
self.pg_address.ip(),
|
||||||
|
self.pg_address.port(),
|
||||||
|
"postgres"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
mod background_process;
|
mod background_process;
|
||||||
pub mod broker;
|
pub mod broker;
|
||||||
pub mod compute;
|
pub mod endpoint;
|
||||||
pub mod local_env;
|
pub mod local_env;
|
||||||
pub mod pageserver;
|
pub mod pageserver;
|
||||||
pub mod postgresql_conf;
|
pub mod postgresql_conf;
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
use anyhow::{bail, ensure, Context};
|
use anyhow::{bail, ensure, Context};
|
||||||
|
|
||||||
|
use postgres_backend::AuthType;
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
@@ -17,14 +18,13 @@ use std::net::SocketAddr;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use utils::{
|
use utils::{
|
||||||
auth::{encode_from_key_file, Claims, Scope},
|
auth::{encode_from_key_file, Claims},
|
||||||
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
||||||
postgres_backend::AuthType,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::safekeeper::SafekeeperNode;
|
use crate::safekeeper::SafekeeperNode;
|
||||||
|
|
||||||
pub const DEFAULT_PG_VERSION: u32 = 14;
|
pub const DEFAULT_PG_VERSION: u32 = 15;
|
||||||
|
|
||||||
//
|
//
|
||||||
// This data structures represents neon_local CLI config
|
// This data structures represents neon_local CLI config
|
||||||
@@ -37,7 +37,7 @@ pub const DEFAULT_PG_VERSION: u32 = 14;
|
|||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
pub struct LocalEnv {
|
pub struct LocalEnv {
|
||||||
// Base directory for all the nodes (the pageserver, safekeepers and
|
// Base directory for all the nodes (the pageserver, safekeepers and
|
||||||
// compute nodes).
|
// compute endpoints).
|
||||||
//
|
//
|
||||||
// This is not stored in the config file. Rather, this is the path where the
|
// This is not stored in the config file. Rather, this is the path where the
|
||||||
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
||||||
@@ -110,15 +110,14 @@ impl NeonBroker {
|
|||||||
pub struct PageServerConf {
|
pub struct PageServerConf {
|
||||||
// node id
|
// node id
|
||||||
pub id: NodeId,
|
pub id: NodeId,
|
||||||
|
|
||||||
// Pageserver connection settings
|
// Pageserver connection settings
|
||||||
pub listen_pg_addr: String,
|
pub listen_pg_addr: String,
|
||||||
pub listen_http_addr: String,
|
pub listen_http_addr: String,
|
||||||
|
|
||||||
// used to determine which auth type is used
|
// auth type used for the PG and HTTP ports
|
||||||
pub auth_type: AuthType,
|
pub pg_auth_type: AuthType,
|
||||||
|
pub http_auth_type: AuthType,
|
||||||
// jwt auth token used for communication with pageserver
|
|
||||||
pub auth_token: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PageServerConf {
|
impl Default for PageServerConf {
|
||||||
@@ -127,8 +126,8 @@ impl Default for PageServerConf {
|
|||||||
id: NodeId(0),
|
id: NodeId(0),
|
||||||
listen_pg_addr: String::new(),
|
listen_pg_addr: String::new(),
|
||||||
listen_http_addr: String::new(),
|
listen_http_addr: String::new(),
|
||||||
auth_type: AuthType::Trust,
|
pg_auth_type: AuthType::Trust,
|
||||||
auth_token: String::new(),
|
http_auth_type: AuthType::Trust,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -201,14 +200,8 @@ impl LocalEnv {
|
|||||||
self.neon_distrib_dir.join("storage_broker")
|
self.neon_distrib_dir.join("storage_broker")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
pub fn endpoints_path(&self) -> PathBuf {
|
||||||
self.base_data_dir.join("pgdatadirs").join("tenants")
|
self.base_data_dir.join("endpoints")
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pg_data_dir(&self, tenant_id: &TenantId, branch_name: &str) -> PathBuf {
|
|
||||||
self.pg_data_dirs_path()
|
|
||||||
.join(tenant_id.to_string())
|
|
||||||
.join(branch_name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: move pageserver files into ./pageserver
|
// TODO: move pageserver files into ./pageserver
|
||||||
@@ -401,49 +394,34 @@ impl LocalEnv {
|
|||||||
|
|
||||||
fs::create_dir(base_path)?;
|
fs::create_dir(base_path)?;
|
||||||
|
|
||||||
// generate keys for jwt
|
// Generate keypair for JWT.
|
||||||
// openssl genrsa -out private_key.pem 2048
|
//
|
||||||
let private_key_path;
|
// The keypair is only needed if authentication is enabled in any of the
|
||||||
|
// components. For convenience, we generate the keypair even if authentication
|
||||||
|
// is not enabled, so that you can easily enable it after the initialization
|
||||||
|
// step. However, if the key generation fails, we treat it as non-fatal if
|
||||||
|
// authentication was not enabled.
|
||||||
if self.private_key_path == PathBuf::new() {
|
if self.private_key_path == PathBuf::new() {
|
||||||
private_key_path = base_path.join("auth_private_key.pem");
|
match generate_auth_keys(
|
||||||
let keygen_output = Command::new("openssl")
|
base_path.join("auth_private_key.pem").as_path(),
|
||||||
.arg("genrsa")
|
base_path.join("auth_public_key.pem").as_path(),
|
||||||
.args(["-out", private_key_path.to_str().unwrap()])
|
) {
|
||||||
.arg("2048")
|
Ok(()) => {
|
||||||
.stdout(Stdio::null())
|
self.private_key_path = PathBuf::from("auth_private_key.pem");
|
||||||
.output()
|
}
|
||||||
.context("failed to generate auth private key")?;
|
Err(e) => {
|
||||||
if !keygen_output.status.success() {
|
if !self.auth_keys_needed() {
|
||||||
bail!(
|
eprintln!("Could not generate keypair for JWT authentication: {e}");
|
||||||
"openssl failed: '{}'",
|
eprintln!("Continuing anyway because authentication was not enabled");
|
||||||
String::from_utf8_lossy(&keygen_output.stderr)
|
self.private_key_path = PathBuf::from("auth_private_key.pem");
|
||||||
);
|
} else {
|
||||||
}
|
return Err(e);
|
||||||
self.private_key_path = PathBuf::from("auth_private_key.pem");
|
}
|
||||||
|
}
|
||||||
let public_key_path = base_path.join("auth_public_key.pem");
|
|
||||||
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
|
||||||
let keygen_output = Command::new("openssl")
|
|
||||||
.arg("rsa")
|
|
||||||
.args(["-in", private_key_path.to_str().unwrap()])
|
|
||||||
.arg("-pubout")
|
|
||||||
.args(["-outform", "PEM"])
|
|
||||||
.args(["-out", public_key_path.to_str().unwrap()])
|
|
||||||
.stdout(Stdio::null())
|
|
||||||
.output()
|
|
||||||
.context("failed to generate auth private key")?;
|
|
||||||
if !keygen_output.status.success() {
|
|
||||||
bail!(
|
|
||||||
"openssl failed: '{}'",
|
|
||||||
String::from_utf8_lossy(&keygen_output.stderr)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pageserver.auth_token =
|
fs::create_dir_all(self.endpoints_path())?;
|
||||||
self.generate_auth_token(&Claims::new(None, Scope::PageServerApi))?;
|
|
||||||
|
|
||||||
fs::create_dir_all(self.pg_data_dirs_path())?;
|
|
||||||
|
|
||||||
for safekeeper in &self.safekeepers {
|
for safekeeper in &self.safekeepers {
|
||||||
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
||||||
@@ -451,6 +429,12 @@ impl LocalEnv {
|
|||||||
|
|
||||||
self.persist_config(base_path)
|
self.persist_config(base_path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn auth_keys_needed(&self) -> bool {
|
||||||
|
self.pageserver.pg_auth_type == AuthType::NeonJWT
|
||||||
|
|| self.pageserver.http_auth_type == AuthType::NeonJWT
|
||||||
|
|| self.safekeepers.iter().any(|sk| sk.auth_enabled)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn base_path() -> PathBuf {
|
fn base_path() -> PathBuf {
|
||||||
@@ -460,6 +444,43 @@ fn base_path() -> PathBuf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Generate a public/private key pair for JWT authentication
|
||||||
|
fn generate_auth_keys(private_key_path: &Path, public_key_path: &Path) -> anyhow::Result<()> {
|
||||||
|
// Generate the key pair
|
||||||
|
//
|
||||||
|
// openssl genpkey -algorithm ed25519 -out auth_private_key.pem
|
||||||
|
let keygen_output = Command::new("openssl")
|
||||||
|
.arg("genpkey")
|
||||||
|
.args(["-algorithm", "ed25519"])
|
||||||
|
.args(["-out", private_key_path.to_str().unwrap()])
|
||||||
|
.stdout(Stdio::null())
|
||||||
|
.output()
|
||||||
|
.context("failed to generate auth private key")?;
|
||||||
|
if !keygen_output.status.success() {
|
||||||
|
bail!(
|
||||||
|
"openssl failed: '{}'",
|
||||||
|
String::from_utf8_lossy(&keygen_output.stderr)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// Extract the public key from the private key file
|
||||||
|
//
|
||||||
|
// openssl pkey -in auth_private_key.pem -pubout -out auth_public_key.pem
|
||||||
|
let keygen_output = Command::new("openssl")
|
||||||
|
.arg("pkey")
|
||||||
|
.args(["-in", private_key_path.to_str().unwrap()])
|
||||||
|
.arg("-pubout")
|
||||||
|
.args(["-out", public_key_path.to_str().unwrap()])
|
||||||
|
.output()
|
||||||
|
.context("failed to extract public key from private key")?;
|
||||||
|
if !keygen_output.status.success() {
|
||||||
|
bail!(
|
||||||
|
"openssl failed: '{}'",
|
||||||
|
String::from_utf8_lossy(&keygen_output.stderr)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user