mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-13 13:20:38 +00:00
Compare commits
595 Commits
conrad/pro
...
release-71
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1388bbae73 | ||
|
|
6dba1a36b8 | ||
|
|
61ff18dbae | ||
|
|
96d66a201d | ||
|
|
b24850bdb5 | ||
|
|
04f91eea45 | ||
|
|
8e4161eb94 | ||
|
|
e369c58a3c | ||
|
|
237d6ffc02 | ||
|
|
93f7f1d10f | ||
|
|
cf8646da19 | ||
|
|
46e9a472d7 | ||
|
|
c4e5693145 | ||
|
|
2b3cc87a2a | ||
|
|
fe1b181fb1 | ||
|
|
7f080da9d8 | ||
|
|
ec94acdf03 | ||
|
|
2613769ca7 | ||
|
|
a33e1d12fb | ||
|
|
5cabf32dae | ||
|
|
d3490dbfea | ||
|
|
2b9fb47e64 | ||
|
|
7474790c80 | ||
|
|
db1e3ff9f4 | ||
|
|
ec0550e8ce | ||
|
|
126cbd2e8b | ||
|
|
6ceaca96e5 | ||
|
|
2f0b3e7ae2 | ||
|
|
b5d41eaff4 | ||
|
|
aa8c5d1ee9 | ||
|
|
4355dba46c | ||
|
|
cdd8014692 | ||
|
|
c9491a5acb | ||
|
|
5090281b4a | ||
|
|
d69f79c7eb | ||
|
|
c7c58eeab8 | ||
|
|
66f86f184b | ||
|
|
642aa1e160 | ||
|
|
494023f5df | ||
|
|
e9a378d1aa | ||
|
|
cbba8e3390 | ||
|
|
f8c0da43b5 | ||
|
|
9dfed93f70 | ||
|
|
a8eebdb072 | ||
|
|
af8c865903 | ||
|
|
c725a3e4b1 | ||
|
|
857ad70b71 | ||
|
|
56077caaf9 | ||
|
|
552832b819 | ||
|
|
48ae1214c5 | ||
|
|
2a210d4c58 | ||
|
|
acaacd4680 | ||
|
|
77bb6c4cc4 | ||
|
|
e082226a32 | ||
|
|
40e3c913bb | ||
|
|
658d763915 | ||
|
|
c0776b8724 | ||
|
|
1f73dfb842 | ||
|
|
38f184bc91 | ||
|
|
c75e6fbc46 | ||
|
|
9a3bc5556a | ||
|
|
22790fc907 | ||
|
|
ba4e5b51a0 | ||
|
|
6519f875b9 | ||
|
|
ea7be4152a | ||
|
|
8d8e428d4c | ||
|
|
0be952fb89 | ||
|
|
13e794a35c | ||
|
|
bd276839ad | ||
|
|
44d9975799 | ||
|
|
814b090250 | ||
|
|
608c3cedbf | ||
|
|
b2bc5795be | ||
|
|
c89ee814e1 | ||
|
|
83afea3edb | ||
|
|
3b4b9c1d0b | ||
|
|
e1339ac915 | ||
|
|
6564afb822 | ||
|
|
274c2c40b9 | ||
|
|
afdbe0a7d0 | ||
|
|
5945eadd42 | ||
|
|
b76ab45cbe | ||
|
|
7b7d77c817 | ||
|
|
7ec831c956 | ||
|
|
1a36516d75 | ||
|
|
fde8aa103e | ||
|
|
8624aabc98 | ||
|
|
3a10bf8c82 | ||
|
|
1758c10dec | ||
|
|
7eb3d6bb2d | ||
|
|
3833e30d44 | ||
|
|
4631179320 | ||
|
|
4eea3ce705 | ||
|
|
a9bcabe503 | ||
|
|
7a2625b803 | ||
|
|
f51dc6a44e | ||
|
|
a22361b57b | ||
|
|
1e6a1ac9fa | ||
|
|
02e8fd0b52 | ||
|
|
8adc4031d0 | ||
|
|
46379cd3f2 | ||
|
|
b3a76d9601 | ||
|
|
6c1bbe8434 | ||
|
|
a006f7656e | ||
|
|
31122adee3 | ||
|
|
311cc71b08 | ||
|
|
0356fc426b | ||
|
|
35738ca37f | ||
|
|
fa24d27d38 | ||
|
|
fb6c1e9390 | ||
|
|
d1d4631c8f | ||
|
|
b87a1384f0 | ||
|
|
5702e1cb46 | ||
|
|
5be3e09082 | ||
|
|
cd3f4b3a53 | ||
|
|
57f22178d7 | ||
|
|
3f05758d09 | ||
|
|
010203a49e | ||
|
|
7c40266c82 | ||
|
|
7b3f94c1f0 | ||
|
|
d8205248e2 | ||
|
|
a4d3e0c747 | ||
|
|
df0748289b | ||
|
|
407bf968c1 | ||
|
|
e0a5bb17ed | ||
|
|
6026cbfb63 | ||
|
|
3a0ee16ed5 | ||
|
|
dbcfc01471 | ||
|
|
8bf597c4d7 | ||
|
|
138ae15a91 | ||
|
|
59eeadabe9 | ||
|
|
daf8edd986 | ||
|
|
a1272b6ed8 | ||
|
|
28ee7cdede | ||
|
|
7b63092958 | ||
|
|
31bfeaf934 | ||
|
|
21b3a191bf | ||
|
|
f7f9b4aaec | ||
|
|
bba062e262 | ||
|
|
067363fe95 | ||
|
|
affe408433 | ||
|
|
9b883e4651 | ||
|
|
b98b301d56 | ||
|
|
ed7ee73cba | ||
|
|
fceace835b | ||
|
|
1b508a6082 | ||
|
|
f87b031876 | ||
|
|
9f1ba2c4bf | ||
|
|
9868bb3346 | ||
|
|
27da0e9cf5 | ||
|
|
de9bf2af6c | ||
|
|
3d2c2ce139 | ||
|
|
82a2081d61 | ||
|
|
ff174a88c0 | ||
|
|
ef3ebfaf67 | ||
|
|
ae1af558b4 | ||
|
|
c150ad4ee2 | ||
|
|
a98ccd185b | ||
|
|
9f796ebba9 | ||
|
|
d51ca338c4 | ||
|
|
07e78102bf | ||
|
|
b21e131d11 | ||
|
|
abe3b4e005 | ||
|
|
18e7c2b7a1 | ||
|
|
ad5d784fb7 | ||
|
|
85d47637ee | ||
|
|
7e818ee390 | ||
|
|
bff505426e | ||
|
|
bf7de92dc2 | ||
|
|
9dc71f5a88 | ||
|
|
2ede9d7a25 | ||
|
|
ea5460843c | ||
|
|
5b16624bcc | ||
|
|
349373cb11 | ||
|
|
957f99cad5 | ||
|
|
2a3a136474 | ||
|
|
cfaf30f5e8 | ||
|
|
72c2d0812e | ||
|
|
537ecf45f8 | ||
|
|
1637a6ee05 | ||
|
|
d74fb7b879 | ||
|
|
7973c3e941 | ||
|
|
085bbaf5f8 | ||
|
|
85b5219861 | ||
|
|
7472c69954 | ||
|
|
3f8819827c | ||
|
|
c440756410 | ||
|
|
0e600eb921 | ||
|
|
a1df835e28 | ||
|
|
119ddf6ccf | ||
|
|
90f447b79d | ||
|
|
7dd71f4126 | ||
|
|
8532d72276 | ||
|
|
d3ff47f572 | ||
|
|
8cc768254f | ||
|
|
5c80743c9c | ||
|
|
5bba3e3c75 | ||
|
|
6caf702417 | ||
|
|
32f668f5e7 | ||
|
|
a91f9d5832 | ||
|
|
547acde6cd | ||
|
|
bea6532881 | ||
|
|
8e2fe6b22e | ||
|
|
4d75e1ef81 | ||
|
|
4c7c00268c | ||
|
|
f28abb953d | ||
|
|
4df39d7304 | ||
|
|
bfc7338246 | ||
|
|
35dac6e6c8 | ||
|
|
e619e8703e | ||
|
|
6fd35bfe32 | ||
|
|
547a431b0d | ||
|
|
f8c01c6341 | ||
|
|
1145700f87 | ||
|
|
44339f5b70 | ||
|
|
7b4a9c1d82 | ||
|
|
3b2fc27de4 | ||
|
|
0b6492e7d3 | ||
|
|
7cfaecbeb6 | ||
|
|
472acae615 | ||
|
|
108bf56e44 | ||
|
|
e83a499ab4 | ||
|
|
ebf3bfadde | ||
|
|
ab06240fae | ||
|
|
cec216c5c0 | ||
|
|
930201e033 | ||
|
|
8328580dc2 | ||
|
|
8d9b632f2a | ||
|
|
55d37c77b9 | ||
|
|
0948fb6bf1 | ||
|
|
285c6d2974 | ||
|
|
a5491463e1 | ||
|
|
a58827f952 | ||
|
|
36b790f282 | ||
|
|
3ef7748e6b | ||
|
|
f3310143e4 | ||
|
|
05b4169644 | ||
|
|
d1495755e7 | ||
|
|
c8dd78c6c8 | ||
|
|
b44ee3950a | ||
|
|
64334f497d | ||
|
|
5ffcb688cc | ||
|
|
32fc2dd683 | ||
|
|
d35ddfbab7 | ||
|
|
3ee82a9895 | ||
|
|
e770aeee92 | ||
|
|
32828cddd6 | ||
|
|
bd2046e1ab | ||
|
|
7e2a3d2728 | ||
|
|
0e4832308d | ||
|
|
0a63bc4818 | ||
|
|
2897dcc9aa | ||
|
|
1d0ec50ddb | ||
|
|
a86b43fcd7 | ||
|
|
b917868ada | ||
|
|
7b7d16f52e | ||
|
|
fee4169b6b | ||
|
|
47e06a2cc6 | ||
|
|
c4423c0623 | ||
|
|
a11cf03123 | ||
|
|
08b33adfee | ||
|
|
4fb50144dd | ||
|
|
c500137ca9 | ||
|
|
252c4acec9 | ||
|
|
db70c175e6 | ||
|
|
ed3b4a58b4 | ||
|
|
2863d1df63 | ||
|
|
320b24eab3 | ||
|
|
13a8a5b09b | ||
|
|
64ccdf65e0 | ||
|
|
1ae6aa09dd | ||
|
|
aeb68e51df | ||
|
|
c3e5223a5d | ||
|
|
daaa3211a4 | ||
|
|
7ff9989dd5 | ||
|
|
ed3b97604c | ||
|
|
47c50ec460 | ||
|
|
8c0ec2f681 | ||
|
|
588bda98e7 | ||
|
|
504ca7720f | ||
|
|
cf4ea92aad | ||
|
|
325294bced | ||
|
|
86c8ba2563 | ||
|
|
feeb2dc6fa | ||
|
|
57f476ff5a | ||
|
|
7ee2bebdb7 | ||
|
|
be598f1bf4 | ||
|
|
939b5954a5 | ||
|
|
371020fe6a | ||
|
|
f45818abed | ||
|
|
0384267d58 | ||
|
|
62b3bd968a | ||
|
|
e3e3bc3542 | ||
|
|
be014a2222 | ||
|
|
2e1fe71cc0 | ||
|
|
068c158ca5 | ||
|
|
b16e4f689f | ||
|
|
dbff725a0c | ||
|
|
7fa4628434 | ||
|
|
fc538a38b9 | ||
|
|
c2e7cb324f | ||
|
|
101043122e | ||
|
|
c4d7d59825 | ||
|
|
0de1e1d664 | ||
|
|
271598b77f | ||
|
|
459bc479dc | ||
|
|
c213373a59 | ||
|
|
e0addc100d | ||
|
|
0519138b04 | ||
|
|
5da39b469c | ||
|
|
82027e22dd | ||
|
|
c431e2f1c5 | ||
|
|
4e5724d9c3 | ||
|
|
0d3e499059 | ||
|
|
7b860b837c | ||
|
|
41fc96e20f | ||
|
|
fb2b1ce57b | ||
|
|
464717451b | ||
|
|
c6ed86d3d0 | ||
|
|
f0a9017008 | ||
|
|
bb7949ba00 | ||
|
|
1df0f69664 | ||
|
|
970066a914 | ||
|
|
1ebd3897c0 | ||
|
|
6460beffcd | ||
|
|
6f7f8958db | ||
|
|
936a00e077 | ||
|
|
96a4e8de66 | ||
|
|
01180666b0 | ||
|
|
6c94269c32 | ||
|
|
edc691647d | ||
|
|
855d7b4781 | ||
|
|
c49c9707ce | ||
|
|
2227540a0d | ||
|
|
f1347f2417 | ||
|
|
30b295b017 | ||
|
|
1cef395266 | ||
|
|
78d160f76d | ||
|
|
b9238059d6 | ||
|
|
d0cb4b88c8 | ||
|
|
1ec3e39d4e | ||
|
|
a1a74eef2c | ||
|
|
90e689adda | ||
|
|
f0b2d4b053 | ||
|
|
299d9474c9 | ||
|
|
7234208b36 | ||
|
|
93450f11f5 | ||
|
|
2f0f9edf33 | ||
|
|
d424f2b7c8 | ||
|
|
21315e80bc | ||
|
|
483b66d383 | ||
|
|
aa72a22661 | ||
|
|
5c0264b591 | ||
|
|
9f13277729 | ||
|
|
54aa319805 | ||
|
|
4a227484bf | ||
|
|
2f83f85291 | ||
|
|
d6cfcb0d93 | ||
|
|
392843ad2a | ||
|
|
bd4dae8f4a | ||
|
|
b05fe53cfd | ||
|
|
c13a2f0df1 | ||
|
|
39be366fc5 | ||
|
|
6eda0a3158 | ||
|
|
306c7a1813 | ||
|
|
80be423a58 | ||
|
|
5dcfef82f2 | ||
|
|
e67b8f69c0 | ||
|
|
e546872ab4 | ||
|
|
322ea1cf7c | ||
|
|
3633742de9 | ||
|
|
079d3a37ba | ||
|
|
a46e77b476 | ||
|
|
a92702b01e | ||
|
|
8ff3253f20 | ||
|
|
04b82c92a7 | ||
|
|
e5bf423e68 | ||
|
|
60af392e45 | ||
|
|
661fc41e71 | ||
|
|
702c488f32 | ||
|
|
45c5122754 | ||
|
|
558394f710 | ||
|
|
73b0898608 | ||
|
|
e65be4c2dc | ||
|
|
40087b8164 | ||
|
|
c762b59483 | ||
|
|
5d71601ca9 | ||
|
|
a113c3e433 | ||
|
|
e81fc598f4 | ||
|
|
48b845fa76 | ||
|
|
27096858dc | ||
|
|
4430d0ae7d | ||
|
|
6e183aa0de | ||
|
|
fd6d0b7635 | ||
|
|
3710c32aae | ||
|
|
be83bee49d | ||
|
|
cf28e5922a | ||
|
|
7d384d6953 | ||
|
|
4b3b37b912 | ||
|
|
1d8d200f4d | ||
|
|
0d80d6ce18 | ||
|
|
f653ee039f | ||
|
|
e614a95853 | ||
|
|
850db4cc13 | ||
|
|
8a316b1277 | ||
|
|
4d13bae449 | ||
|
|
49377abd98 | ||
|
|
a6b2f4e54e | ||
|
|
face60d50b | ||
|
|
9768aa27f2 | ||
|
|
96b2e575e1 | ||
|
|
7222777784 | ||
|
|
5469fdede0 | ||
|
|
72aa6b9fdd | ||
|
|
ae0634b7be | ||
|
|
70711f32fa | ||
|
|
52a88af0aa | ||
|
|
b7a43bf817 | ||
|
|
dce91b33a4 | ||
|
|
23ee4f3050 | ||
|
|
46857e8282 | ||
|
|
368ab0ce54 | ||
|
|
a5987eebfd | ||
|
|
6686ede30f | ||
|
|
373c7057cc | ||
|
|
7d6ec16166 | ||
|
|
0e6fdc8a58 | ||
|
|
521438a5c6 | ||
|
|
07d7874bc8 | ||
|
|
1804111a02 | ||
|
|
cd0178efed | ||
|
|
333574be57 | ||
|
|
79a799a143 | ||
|
|
9da06af6c9 | ||
|
|
ce1753d036 | ||
|
|
67db8432b4 | ||
|
|
4e2e44e524 | ||
|
|
ed786104f3 | ||
|
|
84b74f2bd1 | ||
|
|
fec2ad6283 | ||
|
|
98eebd4682 | ||
|
|
2f74287c9b | ||
|
|
aee1bf95e3 | ||
|
|
b9de9d75ff | ||
|
|
7943b709e6 | ||
|
|
d7d066d493 | ||
|
|
e78ac22107 | ||
|
|
76a8f2bb44 | ||
|
|
8d59a8581f | ||
|
|
b1ddd01289 | ||
|
|
6eae4fc9aa | ||
|
|
765455bca2 | ||
|
|
4204960942 | ||
|
|
67345d66ea | ||
|
|
2266ee5971 | ||
|
|
b58445d855 | ||
|
|
36050e7f3d | ||
|
|
33360ed96d | ||
|
|
39a28d1108 | ||
|
|
efa6aa134f | ||
|
|
2c724e56e2 | ||
|
|
feff887c6f | ||
|
|
353d915fcf | ||
|
|
2e38098cbc | ||
|
|
a6fe5ea1ac | ||
|
|
05b0aed0c1 | ||
|
|
cd1705357d | ||
|
|
6bc7561290 | ||
|
|
fbd3ac14b5 | ||
|
|
e437787c8f | ||
|
|
3460dbf90b | ||
|
|
6b89d99677 | ||
|
|
6cc8ea86e4 | ||
|
|
e62a492d6f | ||
|
|
a475cdf642 | ||
|
|
7002c79a47 | ||
|
|
ee6cf357b4 | ||
|
|
e5c2086b5f | ||
|
|
5f1208296a | ||
|
|
88e8e473cd | ||
|
|
b0a77844f6 | ||
|
|
1baf464307 | ||
|
|
e9b8e81cea | ||
|
|
85d6194aa4 | ||
|
|
333a7a68ef | ||
|
|
6aa4e41bee | ||
|
|
840183e51f | ||
|
|
cbccc94b03 | ||
|
|
fce227df22 | ||
|
|
bd787e800f | ||
|
|
4a7704b4a3 | ||
|
|
ff1119da66 | ||
|
|
4c3ba1627b | ||
|
|
1407174fb2 | ||
|
|
ec9dcb1889 | ||
|
|
d11d781afc | ||
|
|
4e44565b71 | ||
|
|
4ed51ad33b | ||
|
|
1c1ebe5537 | ||
|
|
c19cb7f386 | ||
|
|
4b97d31b16 | ||
|
|
923ade3dd7 | ||
|
|
b04e711975 | ||
|
|
afd0a6b39a | ||
|
|
99752286d8 | ||
|
|
15df93363c | ||
|
|
bc0ab741af | ||
|
|
51d9dfeaa3 | ||
|
|
f63cb18155 | ||
|
|
0de603d88e | ||
|
|
240913912a | ||
|
|
91a4ea0de2 | ||
|
|
8608704f49 | ||
|
|
efef68ce99 | ||
|
|
8daefd24da | ||
|
|
46cc8b7982 | ||
|
|
38cd90dd0c | ||
|
|
a51b269f15 | ||
|
|
43bf6d0a0f | ||
|
|
15273a9b66 | ||
|
|
78aca668d0 | ||
|
|
acbf4148ea | ||
|
|
6508540561 | ||
|
|
a41b5244a8 | ||
|
|
2b3189be95 | ||
|
|
248563c595 | ||
|
|
14cd6ca933 | ||
|
|
eb36403e71 | ||
|
|
3c6f779698 | ||
|
|
f67f0c1c11 | ||
|
|
edb02d3299 | ||
|
|
664a69e65b | ||
|
|
478322ebf9 | ||
|
|
802f174072 | ||
|
|
47f9890bae | ||
|
|
262265daad | ||
|
|
300da5b872 | ||
|
|
7b22b5c433 | ||
|
|
ffca97bc1e | ||
|
|
cb356f3259 | ||
|
|
c85374295f | ||
|
|
4992160677 | ||
|
|
bd535b3371 | ||
|
|
d90c5a03af | ||
|
|
2d02cc9079 | ||
|
|
49ad94b99f | ||
|
|
948a217398 | ||
|
|
125381eae7 | ||
|
|
cd01bbc715 | ||
|
|
d8b5e3b88d | ||
|
|
06d25f2186 | ||
|
|
f759b561f3 | ||
|
|
ece0555600 | ||
|
|
73ea0a0b01 | ||
|
|
d8f6d6fd6f | ||
|
|
d24de169a7 | ||
|
|
0816168296 | ||
|
|
277b44d57a | ||
|
|
68c2c3880e | ||
|
|
49da498f65 | ||
|
|
2c76ba3dd7 | ||
|
|
dbe3dc69ad | ||
|
|
8e5bb3ed49 | ||
|
|
ab0be7b8da | ||
|
|
b4c55f5d24 | ||
|
|
ede70d833c | ||
|
|
70c3d18bb0 | ||
|
|
7a491f52c4 | ||
|
|
323c4ecb4f | ||
|
|
3d2466607e | ||
|
|
ed478b39f4 | ||
|
|
91585a558d | ||
|
|
93467eae1f | ||
|
|
f3aac81d19 | ||
|
|
979ad60c19 | ||
|
|
9316cb1b1f | ||
|
|
e7939a527a | ||
|
|
36d26665e1 | ||
|
|
873347f977 | ||
|
|
e814ac16f9 | ||
|
|
ad3055d386 | ||
|
|
94e03eb452 | ||
|
|
380f26ef79 | ||
|
|
3c5b7f59d7 | ||
|
|
fee89f80b5 | ||
|
|
41cce8eaf1 | ||
|
|
f88fe0218d | ||
|
|
cc856eca85 | ||
|
|
cf350c6002 | ||
|
|
0ce6b6a0a3 | ||
|
|
73f247d537 | ||
|
|
960be82183 | ||
|
|
806e5a6c19 | ||
|
|
8d5df07cce | ||
|
|
df7a9d1407 |
@@ -46,9 +46,6 @@ workspace-members = [
|
|||||||
"utils",
|
"utils",
|
||||||
"wal_craft",
|
"wal_craft",
|
||||||
"walproposer",
|
"walproposer",
|
||||||
"postgres-protocol2",
|
|
||||||
"postgres-types2",
|
|
||||||
"tokio-postgres2",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# Write out exact versions rather than a semver range. (Defaults to false.)
|
# Write out exact versions rather than a semver range. (Defaults to false.)
|
||||||
|
|||||||
@@ -7,10 +7,6 @@ inputs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
aws_oicd_role_arn:
|
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
base-url:
|
base-url:
|
||||||
@@ -43,8 +39,7 @@ runs:
|
|||||||
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
if [ "${PR_NUMBER}" != "null" ]; then
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
BRANCH_OR_PR=pr-${PR_NUMBER}
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then
|
||||||
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
|
|
||||||
# Shortcut for special branches
|
# Shortcut for special branches
|
||||||
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
else
|
else
|
||||||
@@ -84,14 +79,6 @@ runs:
|
|||||||
ALLURE_VERSION: 2.27.0
|
ALLURE_VERSION: 2.27.0
|
||||||
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
- name: Acquire lock
|
- name: Acquire lock
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
15
.github/actions/allure-report-store/action.yml
vendored
15
.github/actions/allure-report-store/action.yml
vendored
@@ -8,10 +8,6 @@ inputs:
|
|||||||
unique-key:
|
unique-key:
|
||||||
description: 'string to distinguish different results in the same run'
|
description: 'string to distinguish different results in the same run'
|
||||||
required: true
|
required: true
|
||||||
aws_oicd_role_arn:
|
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -23,8 +19,7 @@ runs:
|
|||||||
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
if [ "${PR_NUMBER}" != "null" ]; then
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
BRANCH_OR_PR=pr-${PR_NUMBER}
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then
|
||||||
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
|
|
||||||
# Shortcut for special branches
|
# Shortcut for special branches
|
||||||
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
else
|
else
|
||||||
@@ -36,14 +31,6 @@ runs:
|
|||||||
env:
|
env:
|
||||||
REPORT_DIR: ${{ inputs.report-dir }}
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
28
.github/actions/run-python-test-set/action.yml
vendored
28
.github/actions/run-python-test-set/action.yml
vendored
@@ -36,8 +36,8 @@ inputs:
|
|||||||
description: 'Region name for real s3 tests'
|
description: 'Region name for real s3 tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
rerun_failed:
|
rerun_flaky:
|
||||||
description: 'Whether to rerun failed tests'
|
description: 'Whether to rerun flaky tests'
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
pg_version:
|
pg_version:
|
||||||
@@ -48,10 +48,6 @@ inputs:
|
|||||||
description: 'benchmark durations JSON'
|
description: 'benchmark durations JSON'
|
||||||
required: false
|
required: false
|
||||||
default: '{}'
|
default: '{}'
|
||||||
aws_oicd_role_arn:
|
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -108,7 +104,7 @@ runs:
|
|||||||
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
||||||
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
||||||
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
||||||
RERUN_FAILED: ${{ inputs.rerun_failed }}
|
RERUN_FLAKY: ${{ inputs.rerun_flaky }}
|
||||||
PG_VERSION: ${{ inputs.pg_version }}
|
PG_VERSION: ${{ inputs.pg_version }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
@@ -154,8 +150,15 @@ runs:
|
|||||||
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${RERUN_FAILED}" == "true" ]; then
|
if [ "${RERUN_FLAKY}" == "true" ]; then
|
||||||
EXTRA_PARAMS="--reruns 2 $EXTRA_PARAMS"
|
mkdir -p $TEST_OUTPUT
|
||||||
|
poetry run ./scripts/flaky_tests.py "${TEST_RESULT_CONNSTR}" \
|
||||||
|
--days 7 \
|
||||||
|
--output "$TEST_OUTPUT/flaky.json" \
|
||||||
|
--pg-version "${DEFAULT_PG_VERSION}" \
|
||||||
|
--build-type "${BUILD_TYPE}"
|
||||||
|
|
||||||
|
EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We use pytest-split plugin to run benchmarks in parallel on different CI runners
|
# We use pytest-split plugin to run benchmarks in parallel on different CI runners
|
||||||
@@ -219,13 +222,6 @@ runs:
|
|||||||
# (for example if we didn't run the test for non build-and-test workflow)
|
# (for example if we didn't run the test for non build-and-test workflow)
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-store
|
uses: ./.github/actions/allure-report-store
|
||||||
|
|||||||
11
.github/workflows/_build-and-test-locally.yml
vendored
11
.github/workflows/_build-and-test-locally.yml
vendored
@@ -19,8 +19,8 @@ on:
|
|||||||
description: 'debug or release'
|
description: 'debug or release'
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
test-cfg:
|
pg-versions:
|
||||||
description: 'a json object of postgres versions and lfc states to run regression tests on'
|
description: 'a json array of postgres versions to run regression tests on'
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
@@ -276,14 +276,14 @@ jobs:
|
|||||||
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
|
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix: ${{ fromJSON(format('{{"include":{0}}}', inputs.test-cfg)) }}
|
matrix:
|
||||||
|
pg_version: ${{ fromJson(inputs.pg-versions) }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
with:
|
with:
|
||||||
@@ -293,14 +293,13 @@ jobs:
|
|||||||
run_with_real_s3: true
|
run_with_real_s3: true
|
||||||
real_s3_bucket: neon-github-ci-tests
|
real_s3_bucket: neon-github-ci-tests
|
||||||
real_s3_region: eu-central-1
|
real_s3_region: eu-central-1
|
||||||
rerun_failed: true
|
rerun_flaky: true
|
||||||
pg_version: ${{ matrix.pg_version }}
|
pg_version: ${{ matrix.pg_version }}
|
||||||
env:
|
env:
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
BUILD_TAG: ${{ inputs.build-tag }}
|
BUILD_TAG: ${{ inputs.build-tag }}
|
||||||
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
||||||
USE_LFC: ${{ matrix.lfc_state == 'with-lfc' && 'true' || 'false' }}
|
|
||||||
|
|
||||||
# Temporary disable this step until we figure out why it's so flaky
|
# Temporary disable this step until we figure out why it's so flaky
|
||||||
# Ref https://github.com/neondatabase/neon/issues/4540
|
# Ref https://github.com/neondatabase/neon/issues/4540
|
||||||
|
|||||||
79
.github/workflows/_create-release-pr.yml
vendored
79
.github/workflows/_create-release-pr.yml
vendored
@@ -1,79 +0,0 @@
|
|||||||
name: Create Release PR
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
component-name:
|
|
||||||
description: 'Component name'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
release-branch:
|
|
||||||
description: 'Release branch'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
secrets:
|
|
||||||
ci-access-token:
|
|
||||||
description: 'CI access token'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash -euo pipefail {0}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
create-release-branch:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write # for `git push`
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: main
|
|
||||||
|
|
||||||
- name: Set variables
|
|
||||||
id: vars
|
|
||||||
env:
|
|
||||||
COMPONENT_NAME: ${{ inputs.component-name }}
|
|
||||||
RELEASE_BRANCH: ${{ inputs.release-branch }}
|
|
||||||
run: |
|
|
||||||
today=$(date +'%Y-%m-%d')
|
|
||||||
echo "title=${COMPONENT_NAME} release ${today}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
echo "rc-branch=rc/${RELEASE_BRANCH}/${today}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
|
|
||||||
- name: Configure git
|
|
||||||
run: |
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
|
||||||
|
|
||||||
- name: Create RC branch
|
|
||||||
env:
|
|
||||||
RC_BRANCH: ${{ steps.vars.outputs.rc-branch }}
|
|
||||||
TITLE: ${{ steps.vars.outputs.title }}
|
|
||||||
run: |
|
|
||||||
git checkout -b "${RC_BRANCH}"
|
|
||||||
|
|
||||||
# create an empty commit to distinguish workflow runs
|
|
||||||
# from other possible releases from the same commit
|
|
||||||
git commit --allow-empty -m "${TITLE}"
|
|
||||||
|
|
||||||
git push origin "${RC_BRANCH}"
|
|
||||||
|
|
||||||
- name: Create a PR into ${{ inputs.release-branch }}
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.ci-access-token }}
|
|
||||||
RC_BRANCH: ${{ steps.vars.outputs.rc-branch }}
|
|
||||||
RELEASE_BRANCH: ${{ inputs.release-branch }}
|
|
||||||
TITLE: ${{ steps.vars.outputs.title }}
|
|
||||||
run: |
|
|
||||||
cat << EOF > body.md
|
|
||||||
## ${TITLE}
|
|
||||||
|
|
||||||
**Please merge this Pull Request using 'Create a merge commit' button**
|
|
||||||
EOF
|
|
||||||
|
|
||||||
gh pr create --title "${TITLE}" \
|
|
||||||
--body-file "body.md" \
|
|
||||||
--head "${RC_BRANCH}" \
|
|
||||||
--base "${RELEASE_BRANCH}"
|
|
||||||
54
.github/workflows/benchmarking.yml
vendored
54
.github/workflows/benchmarking.yml
vendored
@@ -122,7 +122,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# to ensure tests are running in order of appears in the file.
|
# to ensure tests are running in order of appears in the file.
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
@@ -134,7 +133,6 @@ jobs:
|
|||||||
--ignore test_runner/performance/test_perf_pgvector_queries.py
|
--ignore test_runner/performance/test_perf_pgvector_queries.py
|
||||||
--ignore test_runner/performance/test_logical_replication.py
|
--ignore test_runner/performance/test_logical_replication.py
|
||||||
--ignore test_runner/performance/test_physical_replication.py
|
--ignore test_runner/performance/test_physical_replication.py
|
||||||
--ignore test_runner/performance/test_perf_ingest_using_pgcopydb.py
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -151,14 +149,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic perf testing: ${{ job.status }}
|
Periodic perf testing: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
@@ -214,7 +210,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -231,7 +226,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -243,13 +237,11 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
# Post both success and failure to the Slack channel
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && !cancelled() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06T9AMNDQQ" # on-call-compute-staging-stream
|
channel-id: "C06T9AMNDQQ" # on-call-compute-staging-stream
|
||||||
@@ -452,7 +444,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -467,7 +458,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -482,7 +472,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -499,14 +488,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
Periodic perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
@@ -541,7 +528,7 @@ jobs:
|
|||||||
|
|
||||||
runs-on: ${{ matrix.RUNNER }}
|
runs-on: ${{ matrix.RUNNER }}
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned
|
||||||
credentials:
|
credentials:
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
@@ -558,12 +545,12 @@ jobs:
|
|||||||
arch=$(uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
arch=$(uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
||||||
|
|
||||||
cd /home/nonroot
|
cd /home/nonroot
|
||||||
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-17/libpq5_17.2-1.pgdg120+1_${arch}.deb"
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-17/libpq5_17.0-1.pgdg110+1_${arch}.deb"
|
||||||
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-client-16_16.6-1.pgdg120+1_${arch}.deb"
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-client-16_16.4-1.pgdg110+2_${arch}.deb"
|
||||||
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-16_16.6-1.pgdg120+1_${arch}.deb"
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-16_16.4-1.pgdg110+2_${arch}.deb"
|
||||||
dpkg -x libpq5_17.2-1.pgdg120+1_${arch}.deb pg
|
dpkg -x libpq5_17.0-1.pgdg110+1_${arch}.deb pg
|
||||||
dpkg -x postgresql-16_16.6-1.pgdg120+1_${arch}.deb pg
|
dpkg -x postgresql-16_16.4-1.pgdg110+2_${arch}.deb pg
|
||||||
dpkg -x postgresql-client-16_16.6-1.pgdg120+1_${arch}.deb pg
|
dpkg -x postgresql-client-16_16.4-1.pgdg110+2_${arch}.deb pg
|
||||||
|
|
||||||
mkdir -p /tmp/neon/pg_install/v16/bin
|
mkdir -p /tmp/neon/pg_install/v16/bin
|
||||||
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/pgbench /tmp/neon/pg_install/v16/bin/pgbench
|
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/pgbench /tmp/neon/pg_install/v16/bin/pgbench
|
||||||
@@ -611,7 +598,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -626,7 +612,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600
|
extra_params: -m remote_cluster --timeout 21600
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -636,14 +621,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic perf testing on ${{ env.PLATFORM }}: ${{ job.status }}
|
Periodic perf testing on ${{ env.PLATFORM }}: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
@@ -739,7 +722,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -752,14 +734,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic OLAP perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
Periodic OLAP perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
@@ -856,7 +836,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -867,14 +846,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic TPC-H perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
Periodic TPC-H perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
@@ -957,7 +934,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -967,14 +943,12 @@ jobs:
|
|||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic TPC-H perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
Periodic TPC-H perf testing on ${{ matrix.platform }}: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
|
|||||||
108
.github/workflows/build-build-tools-image.yml
vendored
108
.github/workflows/build-build-tools-image.yml
vendored
@@ -3,23 +3,17 @@ name: Build build-tools image
|
|||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
archs:
|
image-tag:
|
||||||
description: "Json array of architectures to build"
|
description: "build-tools image tag"
|
||||||
# Default values are set in `check-image` job, `set-variables` step
|
required: true
|
||||||
type: string
|
type: string
|
||||||
required: false
|
|
||||||
debians:
|
|
||||||
description: "Json array of Debian versions to build"
|
|
||||||
# Default values are set in `check-image` job, `set-variables` step
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
outputs:
|
outputs:
|
||||||
image-tag:
|
image-tag:
|
||||||
description: "build-tools tag"
|
description: "build-tools tag"
|
||||||
value: ${{ jobs.check-image.outputs.tag }}
|
value: ${{ inputs.image-tag }}
|
||||||
image:
|
image:
|
||||||
description: "build-tools image"
|
description: "build-tools image"
|
||||||
value: neondatabase/build-tools:${{ jobs.check-image.outputs.tag }}
|
value: neondatabase/build-tools:${{ inputs.image-tag }}
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@@ -41,48 +35,7 @@ permissions: {}
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-image:
|
check-image:
|
||||||
runs-on: ubuntu-22.04
|
uses: ./.github/workflows/check-build-tools-image.yml
|
||||||
outputs:
|
|
||||||
archs: ${{ steps.set-variables.outputs.archs }}
|
|
||||||
debians: ${{ steps.set-variables.outputs.debians }}
|
|
||||||
tag: ${{ steps.set-variables.outputs.image-tag }}
|
|
||||||
everything: ${{ steps.set-more-variables.outputs.everything }}
|
|
||||||
found: ${{ steps.set-more-variables.outputs.found }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set variables
|
|
||||||
id: set-variables
|
|
||||||
env:
|
|
||||||
ARCHS: ${{ inputs.archs || '["x64","arm64"]' }}
|
|
||||||
DEBIANS: ${{ inputs.debians || '["bullseye","bookworm"]' }}
|
|
||||||
IMAGE_TAG: |
|
|
||||||
${{ hashFiles('build-tools.Dockerfile',
|
|
||||||
'.github/workflows/build-build-tools-image.yml') }}
|
|
||||||
run: |
|
|
||||||
echo "archs=${ARCHS}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
echo "debians=${DEBIANS}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
echo "image-tag=${IMAGE_TAG}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
|
|
||||||
- name: Set more variables
|
|
||||||
id: set-more-variables
|
|
||||||
env:
|
|
||||||
IMAGE_TAG: ${{ steps.set-variables.outputs.image-tag }}
|
|
||||||
EVERYTHING: |
|
|
||||||
${{ contains(fromJson(steps.set-variables.outputs.archs), 'x64') &&
|
|
||||||
contains(fromJson(steps.set-variables.outputs.archs), 'arm64') &&
|
|
||||||
contains(fromJson(steps.set-variables.outputs.debians), 'bullseye') &&
|
|
||||||
contains(fromJson(steps.set-variables.outputs.debians), 'bookworm') }}
|
|
||||||
run: |
|
|
||||||
if docker manifest inspect neondatabase/build-tools:${IMAGE_TAG}; then
|
|
||||||
found=true
|
|
||||||
else
|
|
||||||
found=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "everything=${EVERYTHING}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
echo "found=${found}" | tee -a ${GITHUB_OUTPUT}
|
|
||||||
|
|
||||||
build-image:
|
build-image:
|
||||||
needs: [ check-image ]
|
needs: [ check-image ]
|
||||||
@@ -90,12 +43,25 @@ jobs:
|
|||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
arch: ${{ fromJson(needs.check-image.outputs.archs) }}
|
debian-version: [ bullseye, bookworm ]
|
||||||
debian: ${{ fromJson(needs.check-image.outputs.debians) }}
|
arch: [ x64, arm64 ]
|
||||||
|
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE_TAG: ${{ inputs.image-tag }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check `input.tag` is correct
|
||||||
|
env:
|
||||||
|
INPUTS_IMAGE_TAG: ${{ inputs.image-tag }}
|
||||||
|
CHECK_IMAGE_TAG : ${{ needs.check-image.outputs.image-tag }}
|
||||||
|
run: |
|
||||||
|
if [ "${INPUTS_IMAGE_TAG}" != "${CHECK_IMAGE_TAG}" ]; then
|
||||||
|
echo "'inputs.image-tag' (${INPUTS_IMAGE_TAG}) does not match the tag of the latest build-tools image 'inputs.image-tag' (${CHECK_IMAGE_TAG})"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: neondatabase/dev-actions/set-docker-config-dir@6094485bf440001c94a94a3f9e221e81ff6b6193
|
- uses: neondatabase/dev-actions/set-docker-config-dir@6094485bf440001c94a94a3f9e221e81ff6b6193
|
||||||
@@ -122,14 +88,14 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
pull: true
|
pull: true
|
||||||
build-args: |
|
build-args: |
|
||||||
DEBIAN_VERSION=${{ matrix.debian }}
|
DEBIAN_VERSION=${{ matrix.debian-version }}
|
||||||
cache-from: type=registry,ref=cache.neon.build/build-tools:cache-${{ matrix.debian }}-${{ matrix.arch }}
|
cache-from: type=registry,ref=cache.neon.build/build-tools:cache-${{ matrix.debian-version }}-${{ matrix.arch }}
|
||||||
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/build-tools:cache-{0}-{1},mode=max', matrix.debian, matrix.arch) || '' }}
|
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/build-tools:cache-{0}-{1},mode=max', matrix.debian-version, matrix.arch) || '' }}
|
||||||
tags: |
|
tags: |
|
||||||
neondatabase/build-tools:${{ needs.check-image.outputs.tag }}-${{ matrix.debian }}-${{ matrix.arch }}
|
neondatabase/build-tools:${{ inputs.image-tag }}-${{ matrix.debian-version }}-${{ matrix.arch }}
|
||||||
|
|
||||||
merge-images:
|
merge-images:
|
||||||
needs: [ check-image, build-image ]
|
needs: [ build-image ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -140,22 +106,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Create multi-arch image
|
- name: Create multi-arch image
|
||||||
env:
|
env:
|
||||||
DEFAULT_DEBIAN_VERSION: bookworm
|
DEFAULT_DEBIAN_VERSION: bullseye
|
||||||
ARCHS: ${{ join(fromJson(needs.check-image.outputs.archs), ' ') }}
|
IMAGE_TAG: ${{ inputs.image-tag }}
|
||||||
DEBIANS: ${{ join(fromJson(needs.check-image.outputs.debians), ' ') }}
|
|
||||||
EVERYTHING: ${{ needs.check-image.outputs.everything }}
|
|
||||||
IMAGE_TAG: ${{ needs.check-image.outputs.tag }}
|
|
||||||
run: |
|
run: |
|
||||||
for debian in ${DEBIANS}; do
|
for debian_version in bullseye bookworm; do
|
||||||
tags=("-t" "neondatabase/build-tools:${IMAGE_TAG}-${debian}")
|
tags=("-t" "neondatabase/build-tools:${IMAGE_TAG}-${debian_version}")
|
||||||
|
if [ "${debian_version}" == "${DEFAULT_DEBIAN_VERSION}" ]; then
|
||||||
if [ "${EVERYTHING}" == "true" ] && [ "${debian}" == "${DEFAULT_DEBIAN_VERSION}" ]; then
|
|
||||||
tags+=("-t" "neondatabase/build-tools:${IMAGE_TAG}")
|
tags+=("-t" "neondatabase/build-tools:${IMAGE_TAG}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for arch in ${ARCHS}; do
|
docker buildx imagetools create "${tags[@]}" \
|
||||||
tags+=("neondatabase/build-tools:${IMAGE_TAG}-${debian}-${arch}")
|
neondatabase/build-tools:${IMAGE_TAG}-${debian_version}-x64 \
|
||||||
done
|
neondatabase/build-tools:${IMAGE_TAG}-${debian_version}-arm64
|
||||||
|
|
||||||
docker buildx imagetools create "${tags[@]}"
|
|
||||||
done
|
done
|
||||||
|
|||||||
55
.github/workflows/build_and_test.yml
vendored
55
.github/workflows/build_and_test.yml
vendored
@@ -6,7 +6,6 @@ on:
|
|||||||
- main
|
- main
|
||||||
- release
|
- release
|
||||||
- release-proxy
|
- release-proxy
|
||||||
- release-compute
|
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -71,18 +70,22 @@ jobs:
|
|||||||
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
||||||
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
|
|
||||||
echo "tag=release-compute-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release', 'release-proxy', 'release-compute'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
shell: bash
|
shell: bash
|
||||||
id: build-tag
|
id: build-tag
|
||||||
|
|
||||||
build-build-tools-image:
|
check-build-tools-image:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
|
uses: ./.github/workflows/check-build-tools-image.yml
|
||||||
|
|
||||||
|
build-build-tools-image:
|
||||||
|
needs: [ check-build-tools-image ]
|
||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
|
with:
|
||||||
|
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
check-codestyle-python:
|
check-codestyle-python:
|
||||||
@@ -256,14 +259,7 @@ jobs:
|
|||||||
build-tag: ${{ needs.tag.outputs.build-tag }}
|
build-tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
||||||
# run without LFC on v17 release only
|
pg-versions: ${{ matrix.build-type == 'release' && '["v14", "v15", "v16", "v17"]' || '["v17"]' }}
|
||||||
test-cfg: |
|
|
||||||
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
|
|
||||||
{"pg_version":"v15", "lfc_state": "without-lfc"},
|
|
||||||
{"pg_version":"v16", "lfc_state": "without-lfc"},
|
|
||||||
{"pg_version":"v17", "lfc_state": "without-lfc"},
|
|
||||||
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|
|
||||||
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
|
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
||||||
@@ -516,7 +512,7 @@ jobs:
|
|||||||
})
|
})
|
||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' }}
|
||||||
needs: [ check-permissions, promote-images, tag ]
|
needs: [ check-permissions, promote-images, tag ]
|
||||||
uses: ./.github/workflows/trigger-e2e-tests.yml
|
uses: ./.github/workflows/trigger-e2e-tests.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
@@ -672,7 +668,7 @@ jobs:
|
|||||||
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
|
||||||
- name: Build neon extensions test image
|
- name: Build neon extensions test image
|
||||||
if: matrix.version.pg >= 'v16'
|
if: matrix.version.pg == 'v16'
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
@@ -687,7 +683,8 @@ jobs:
|
|||||||
pull: true
|
pull: true
|
||||||
file: compute/compute-node.Dockerfile
|
file: compute/compute-node.Dockerfile
|
||||||
target: neon-pg-ext-test
|
target: neon-pg-ext-test
|
||||||
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
cache-from: type=registry,ref=cache.neon.build/neon-test-extensions-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/neon-test-extensions-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
|
||||||
tags: |
|
tags: |
|
||||||
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{needs.tag.outputs.build-tag}}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{needs.tag.outputs.build-tag}}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
|
||||||
@@ -710,7 +707,7 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
pull: true
|
pull: true
|
||||||
file: compute/compute-node.Dockerfile
|
file: compute/compute-node.Dockerfile
|
||||||
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
cache-from: type=registry,ref=cache.neon.build/neon-test-extensions-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/compute-tools-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
|
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/compute-tools-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
|
||||||
tags: |
|
tags: |
|
||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
@@ -746,7 +743,7 @@ jobs:
|
|||||||
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- name: Create multi-arch neon-test-extensions image
|
- name: Create multi-arch neon-test-extensions image
|
||||||
if: matrix.version.pg >= 'v16'
|
if: matrix.version.pg == 'v16'
|
||||||
run: |
|
run: |
|
||||||
docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }} \
|
docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }} \
|
||||||
-t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }} \
|
-t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }} \
|
||||||
@@ -835,7 +832,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
arch: [ x64, arm64 ]
|
arch: [ x64, arm64 ]
|
||||||
pg_version: [v16, v17]
|
|
||||||
|
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
|
||||||
|
|
||||||
@@ -874,10 +870,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Verify docker-compose example and test extensions
|
- name: Verify docker-compose example and test extensions
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
env:
|
run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
|
||||||
TAG: ${{needs.tag.outputs.build-tag}}
|
|
||||||
TEST_VERSION_ONLY: ${{ matrix.pg_version }}
|
|
||||||
run: ./docker-compose/docker_compose_test.sh
|
|
||||||
|
|
||||||
- name: Print logs and clean up
|
- name: Print logs and clean up
|
||||||
if: always()
|
if: always()
|
||||||
@@ -937,7 +930,7 @@ jobs:
|
|||||||
neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }}
|
neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
- name: Configure AWS-prod credentials
|
- name: Configure AWS-prod credentials
|
||||||
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
@@ -946,12 +939,12 @@ jobs:
|
|||||||
|
|
||||||
- name: Login to prod ECR
|
- name: Login to prod ECR
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
||||||
with:
|
with:
|
||||||
registry: 093970136003.dkr.ecr.eu-central-1.amazonaws.com
|
registry: 093970136003.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
|
||||||
- name: Copy all images to prod ECR
|
- name: Copy all images to prod ECR
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
||||||
run: |
|
run: |
|
||||||
for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16,v17}; do
|
for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16,v17}; do
|
||||||
docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \
|
docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \
|
||||||
@@ -971,7 +964,7 @@ jobs:
|
|||||||
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
||||||
|
|
||||||
push-to-acr-prod:
|
push-to-acr-prod:
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
||||||
needs: [ tag, promote-images ]
|
needs: [ tag, promote-images ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
@@ -1059,7 +1052,7 @@ jobs:
|
|||||||
deploy:
|
deploy:
|
||||||
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
||||||
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy') && !failure() && !cancelled()
|
||||||
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
@@ -1108,15 +1101,13 @@ jobs:
|
|||||||
-f deployProxyAuthBroker=true \
|
-f deployProxyAuthBroker=true \
|
||||||
-f branch=main \
|
-f branch=main \
|
||||||
-f dockerTag=${{needs.tag.outputs.build-tag}}
|
-f dockerTag=${{needs.tag.outputs.build-tag}}
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
|
|
||||||
gh workflow --repo neondatabase/infra run deploy-compute-dev.yml --ref main -f dockerTag=${{needs.tag.outputs.build-tag}}
|
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main', 'release', 'release-proxy' or 'release-compute'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create git tag
|
- name: Create git tag
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy'
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
|||||||
51
.github/workflows/check-build-tools-image.yml
vendored
Normal file
51
.github/workflows/check-build-tools-image.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
name: Check build-tools image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
outputs:
|
||||||
|
image-tag:
|
||||||
|
description: "build-tools image tag"
|
||||||
|
value: ${{ jobs.check-image.outputs.tag }}
|
||||||
|
found:
|
||||||
|
description: "Whether the image is found in the registry"
|
||||||
|
value: ${{ jobs.check-image.outputs.found }}
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash -euo pipefail {0}
|
||||||
|
|
||||||
|
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-image:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
outputs:
|
||||||
|
tag: ${{ steps.get-build-tools-tag.outputs.image-tag }}
|
||||||
|
found: ${{ steps.check-image.outputs.found }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get build-tools image tag for the current commit
|
||||||
|
id: get-build-tools-tag
|
||||||
|
env:
|
||||||
|
IMAGE_TAG: |
|
||||||
|
${{ hashFiles('build-tools.Dockerfile',
|
||||||
|
'.github/workflows/check-build-tools-image.yml',
|
||||||
|
'.github/workflows/build-build-tools-image.yml') }}
|
||||||
|
run: |
|
||||||
|
echo "image-tag=${IMAGE_TAG}" | tee -a $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Check if such tag found in the registry
|
||||||
|
id: check-image
|
||||||
|
env:
|
||||||
|
IMAGE_TAG: ${{ steps.get-build-tools-tag.outputs.image-tag }}
|
||||||
|
run: |
|
||||||
|
if docker manifest inspect neondatabase/build-tools:${IMAGE_TAG}; then
|
||||||
|
found=true
|
||||||
|
else
|
||||||
|
found=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "found=${found}" | tee -a $GITHUB_OUTPUT
|
||||||
278
.github/workflows/ingest_benchmark.yml
vendored
278
.github/workflows/ingest_benchmark.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: benchmarking ingest
|
name: Benchmarking
|
||||||
|
|
||||||
on:
|
on:
|
||||||
# uncomment to run on push for debugging your PR
|
# uncomment to run on push for debugging your PR
|
||||||
@@ -26,7 +26,6 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
ingest:
|
ingest:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # allow other variants to continue even if one fails
|
|
||||||
matrix:
|
matrix:
|
||||||
target_project: [new_empty_project, large_existing_project]
|
target_project: [new_empty_project, large_existing_project]
|
||||||
permissions:
|
permissions:
|
||||||
@@ -75,16 +74,18 @@ jobs:
|
|||||||
compute_units: '[7, 7]' # we want to test large compute here to avoid compute-side bottleneck
|
compute_units: '[7, 7]' # we want to test large compute here to avoid compute-side bottleneck
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project and retrieve current backpressure seconds
|
||||||
if: ${{ matrix.target_project == 'new_empty_project' }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-project-ingest-target.outputs.dsn }}
|
NEW_PROJECT_CONNSTR: ${{ steps.create-neon-project-ingest-target.outputs.dsn }}
|
||||||
NEW_PROJECT_ID: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
NEW_PROJECT_ID: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
||||||
run: |
|
run: |
|
||||||
echo "Initializing Neon project with project_id: ${NEW_PROJECT_ID}"
|
echo "Initializing Neon project with project_id: ${NEW_PROJECT_ID}"
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
${PSQL} "${NEW_PROJECT_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
||||||
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
BACKPRESSURE_TIME_BEFORE_INGEST=$(${PSQL} "${NEW_PROJECT_CONNSTR}" -t -c "select backpressure_throttling_time()/1000000;")
|
||||||
|
echo "BACKPRESSURE_TIME_BEFORE_INGEST=${BACKPRESSURE_TIME_BEFORE_INGEST}" >> $GITHUB_ENV
|
||||||
|
echo "NEW_PROJECT_CONNSTR=${NEW_PROJECT_CONNSTR}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Create Neon Branch for large tenant
|
- name: Create Neon Branch for large tenant
|
||||||
if: ${{ matrix.target_project == 'large_existing_project' }}
|
if: ${{ matrix.target_project == 'large_existing_project' }}
|
||||||
@@ -94,55 +95,266 @@ jobs:
|
|||||||
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project and retrieve current backpressure seconds
|
||||||
if: ${{ matrix.target_project == 'large_existing_project' }}
|
if: ${{ matrix.target_project == 'large_existing_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
NEW_PROJECT_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
||||||
NEW_BRANCH_ID: ${{ steps.create-neon-branch-ingest-target.outputs.branch_id }}
|
NEW_BRANCH_ID: ${{ steps.create-neon-branch-ingest-target.outputs.branch_id }}
|
||||||
run: |
|
run: |
|
||||||
echo "Initializing Neon branch with branch_id: ${NEW_BRANCH_ID}"
|
echo "Initializing Neon branch with branch_id: ${NEW_BRANCH_ID}"
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
# Extract the part before the database name
|
# Extract the part before the database name
|
||||||
base_connstr="${BENCHMARK_INGEST_TARGET_CONNSTR%/*}"
|
base_connstr="${NEW_PROJECT_CONNSTR%/*}"
|
||||||
# Extract the query parameters (if any) after the database name
|
# Extract the query parameters (if any) after the database name
|
||||||
query_params="${BENCHMARK_INGEST_TARGET_CONNSTR#*\?}"
|
query_params="${NEW_PROJECT_CONNSTR#*\?}"
|
||||||
# Reconstruct the new connection string
|
# Reconstruct the new connection string
|
||||||
if [ "$query_params" != "$BENCHMARK_INGEST_TARGET_CONNSTR" ]; then
|
if [ "$query_params" != "$NEW_PROJECT_CONNSTR" ]; then
|
||||||
new_connstr="${base_connstr}/neondb?${query_params}"
|
new_connstr="${base_connstr}/neondb?${query_params}"
|
||||||
else
|
else
|
||||||
new_connstr="${base_connstr}/neondb"
|
new_connstr="${base_connstr}/neondb"
|
||||||
fi
|
fi
|
||||||
${PSQL} "${new_connstr}" -c "drop database ludicrous;"
|
${PSQL} "${new_connstr}" -c "drop database ludicrous;"
|
||||||
${PSQL} "${new_connstr}" -c "CREATE DATABASE ludicrous;"
|
${PSQL} "${new_connstr}" -c "CREATE DATABASE ludicrous;"
|
||||||
if [ "$query_params" != "$BENCHMARK_INGEST_TARGET_CONNSTR" ]; then
|
if [ "$query_params" != "$NEW_PROJECT_CONNSTR" ]; then
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR="${base_connstr}/ludicrous?${query_params}"
|
NEW_PROJECT_CONNSTR="${base_connstr}/ludicrous?${query_params}"
|
||||||
else
|
else
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR="${base_connstr}/ludicrous"
|
NEW_PROJECT_CONNSTR="${base_connstr}/ludicrous"
|
||||||
fi
|
fi
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
${PSQL} "${NEW_PROJECT_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
||||||
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
BACKPRESSURE_TIME_BEFORE_INGEST=$(${PSQL} "${NEW_PROJECT_CONNSTR}" -t -c "select backpressure_throttling_time()/1000000;")
|
||||||
|
echo "BACKPRESSURE_TIME_BEFORE_INGEST=${BACKPRESSURE_TIME_BEFORE_INGEST}" >> $GITHUB_ENV
|
||||||
|
echo "NEW_PROJECT_CONNSTR=${NEW_PROJECT_CONNSTR}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
|
||||||
|
- name: Create pgcopydb filter file
|
||||||
|
run: |
|
||||||
|
cat << EOF > /tmp/pgcopydb_filter.txt
|
||||||
|
[include-only-table]
|
||||||
|
public.events
|
||||||
|
public.emails
|
||||||
|
public.email_transmissions
|
||||||
|
public.payments
|
||||||
|
public.editions
|
||||||
|
public.edition_modules
|
||||||
|
public.sp_content
|
||||||
|
public.email_broadcasts
|
||||||
|
public.user_collections
|
||||||
|
public.devices
|
||||||
|
public.user_accounts
|
||||||
|
public.lessons
|
||||||
|
public.lesson_users
|
||||||
|
public.payment_methods
|
||||||
|
public.orders
|
||||||
|
public.course_emails
|
||||||
|
public.modules
|
||||||
|
public.users
|
||||||
|
public.module_users
|
||||||
|
public.courses
|
||||||
|
public.payment_gateway_keys
|
||||||
|
public.accounts
|
||||||
|
public.roles
|
||||||
|
public.payment_gateways
|
||||||
|
public.management
|
||||||
|
public.event_names
|
||||||
|
EOF
|
||||||
|
|
||||||
- name: Invoke pgcopydb
|
- name: Invoke pgcopydb
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: remote
|
|
||||||
test_selection: performance/test_perf_ingest_using_pgcopydb.py
|
|
||||||
run_in_parallel: false
|
|
||||||
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
|
||||||
pg_version: v16
|
|
||||||
save_perf_report: true
|
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
||||||
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
run: |
|
||||||
# we report PLATFORM in zenbenchmark NeonBenchmarker perf database and want to distinguish between new project and large tenant
|
export LD_LIBRARY_PATH=${PGCOPYDB_LIB_PATH}:${PG_16_LIB_PATH}
|
||||||
PLATFORM: "${{ matrix.target_project }}-us-east-2-staging"
|
export PGCOPYDB_SOURCE_PGURI="${BENCHMARK_INGEST_SOURCE_CONNSTR}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
export PGCOPYDB_TARGET_PGURI="${NEW_PROJECT_CONNSTR}"
|
||||||
|
export PGOPTIONS="-c maintenance_work_mem=8388608 -c max_parallel_maintenance_workers=7"
|
||||||
|
${PG_CONFIG} --bindir
|
||||||
|
${PGCOPYDB} --version
|
||||||
|
${PGCOPYDB} clone --skip-vacuum --no-owner --no-acl --skip-db-properties --table-jobs 4 \
|
||||||
|
--index-jobs 4 --restore-jobs 4 --split-tables-larger-than 10GB --skip-extensions \
|
||||||
|
--use-copy-binary --filters /tmp/pgcopydb_filter.txt 2>&1 | tee /tmp/pgcopydb_${{ matrix.target_project }}.log
|
||||||
|
|
||||||
- name: show tables sizes after ingest
|
# create dummy pgcopydb log to test parsing
|
||||||
|
# - name: create dummy log for parser test
|
||||||
|
# run: |
|
||||||
|
# cat << EOF > /tmp/pgcopydb_${{ matrix.target_project }}.log
|
||||||
|
# 2024-11-04 18:00:53.433 500861 INFO main.c:136 Running pgcopydb version 0.17.10.g8361a93 from "/usr/lib/postgresql/17/bin/pgcopydb"
|
||||||
|
# 2024-11-04 18:00:53.434 500861 INFO cli_common.c:1225 [SOURCE] Copying database from "postgres://neondb_owner@ep-bitter-shape-w2c1ir0a.us-east-2.aws.neon.build/neondb?sslmode=require&keepalives=1&keepalives_idle=10&keepalives_interval=10&keepalives_count=60"
|
||||||
|
# 2024-11-04 18:00:53.434 500861 INFO cli_common.c:1226 [TARGET] Copying database into "postgres://neondb_owner@ep-icy-union-w25qd5pj.us-east-2.aws.neon.build/ludicrous?sslmode=require&keepalives=1&keepalives_idle=10&keepalives_interval=10&keepalives_count=60"
|
||||||
|
# 2024-11-04 18:00:53.442 500861 INFO copydb.c:105 Using work dir "/tmp/pgcopydb"
|
||||||
|
# 2024-11-04 18:00:53.541 500861 INFO snapshot.c:107 Exported snapshot "00000008-00000033-1" from the source database
|
||||||
|
# 2024-11-04 18:00:53.556 500865 INFO cli_clone_follow.c:543 STEP 1: fetch source database tables, indexes, and sequences
|
||||||
|
# 2024-11-04 18:00:54.570 500865 INFO copydb_schema.c:716 Splitting source candidate tables larger than 10 GB
|
||||||
|
# 2024-11-04 18:00:54.570 500865 INFO copydb_schema.c:829 Table public.events is 96 GB large which is larger than --split-tables-larger-than 10 GB, and does not have a unique column of type integer: splitting by CTID
|
||||||
|
# 2024-11-04 18:01:05.538 500865 INFO copydb_schema.c:905 Table public.events is 96 GB large, 10 COPY processes will be used, partitioning on ctid.
|
||||||
|
# 2024-11-04 18:01:05.564 500865 INFO copydb_schema.c:905 Table public.email_transmissions is 27 GB large, 4 COPY processes will be used, partitioning on id.
|
||||||
|
# 2024-11-04 18:01:05.584 500865 INFO copydb_schema.c:905 Table public.lessons is 25 GB large, 4 COPY processes will be used, partitioning on id.
|
||||||
|
# 2024-11-04 18:01:05.605 500865 INFO copydb_schema.c:905 Table public.lesson_users is 16 GB large, 3 COPY processes will be used, partitioning on id.
|
||||||
|
# 2024-11-04 18:01:05.605 500865 INFO copydb_schema.c:761 Fetched information for 26 tables (including 4 tables split in 21 partitions total), with an estimated total of 907 million tuples and 175 GB on-disk
|
||||||
|
# 2024-11-04 18:01:05.687 500865 INFO copydb_schema.c:968 Fetched information for 57 indexes (supporting 25 constraints)
|
||||||
|
# 2024-11-04 18:01:05.753 500865 INFO sequences.c:78 Fetching information for 24 sequences
|
||||||
|
# 2024-11-04 18:01:05.903 500865 INFO copydb_schema.c:1122 Fetched information for 4 extensions
|
||||||
|
# 2024-11-04 18:01:06.178 500865 INFO copydb_schema.c:1538 Found 0 indexes (supporting 0 constraints) in the target database
|
||||||
|
# 2024-11-04 18:01:06.184 500865 INFO cli_clone_follow.c:584 STEP 2: dump the source database schema (pre/post data)
|
||||||
|
# 2024-11-04 18:01:06.186 500865 INFO pgcmd.c:468 /usr/lib/postgresql/16/bin/pg_dump -Fc --snapshot 00000008-00000033-1 --section=pre-data --section=post-data --file /tmp/pgcopydb/schema/schema.dump 'postgres://neondb_owner@ep-bitter-shape-w2c1ir0a.us-east-2.aws.neon.build/neondb?sslmode=require&keepalives=1&keepalives_idle=10&keepalives_interval=10&keepalives_count=60'
|
||||||
|
# 2024-11-04 18:01:06.952 500865 INFO cli_clone_follow.c:592 STEP 3: restore the pre-data section to the target database
|
||||||
|
# 2024-11-04 18:01:07.004 500865 INFO pgcmd.c:1001 /usr/lib/postgresql/16/bin/pg_restore --dbname 'postgres://neondb_owner@ep-icy-union-w25qd5pj.us-east-2.aws.neon.build/ludicrous?sslmode=require&keepalives=1&keepalives_idle=10&keepalives_interval=10&keepalives_count=60' --section pre-data --jobs 4 --no-owner --no-acl --use-list /tmp/pgcopydb/schema/pre-filtered.list /tmp/pgcopydb/schema/schema.dump
|
||||||
|
# 2024-11-04 18:01:07.438 500874 INFO table-data.c:656 STEP 4: starting 4 table-data COPY processes
|
||||||
|
# 2024-11-04 18:01:07.451 500877 INFO vacuum.c:139 STEP 8: skipping VACUUM jobs per --skip-vacuum
|
||||||
|
# 2024-11-04 18:01:07.457 500875 INFO indexes.c:182 STEP 6: starting 4 CREATE INDEX processes
|
||||||
|
# 2024-11-04 18:01:07.457 500875 INFO indexes.c:183 STEP 7: constraints are built by the CREATE INDEX processes
|
||||||
|
# 2024-11-04 18:01:07.507 500865 INFO blobs.c:74 Skipping large objects: none found.
|
||||||
|
# 2024-11-04 18:01:07.509 500865 INFO sequences.c:194 STEP 9: reset sequences values
|
||||||
|
# 2024-11-04 18:01:07.510 500886 INFO sequences.c:290 Set sequences values on the target database
|
||||||
|
# 2024-11-04 20:49:00.587 500865 INFO cli_clone_follow.c:608 STEP 10: restore the post-data section to the target database
|
||||||
|
# 2024-11-04 20:49:00.600 500865 INFO pgcmd.c:1001 /usr/lib/postgresql/16/bin/pg_restore --dbname 'postgres://neondb_owner@ep-icy-union-w25qd5pj.us-east-2.aws.neon.build/ludicrous?sslmode=require&keepalives=1&keepalives_idle=10&keepalives_interval=10&keepalives_count=60' --section post-data --jobs 4 --no-owner --no-acl --use-list /tmp/pgcopydb/schema/post-filtered.list /tmp/pgcopydb/schema/schema.dump
|
||||||
|
# 2024-11-05 10:50:58.508 500865 INFO cli_clone_follow.c:639 All step are now done, 16h49m elapsed
|
||||||
|
# 2024-11-05 10:50:58.508 500865 INFO summary.c:3155 Printing summary for 26 tables and 57 indexes
|
||||||
|
|
||||||
|
# OID | Schema | Name | Parts | copy duration | transmitted bytes | indexes | create index duration
|
||||||
|
# ------+--------+----------------------+-------+---------------+-------------------+---------+----------------------
|
||||||
|
# 24654 | public | events | 10 | 1d11h | 878 GB | 1 | 1h41m
|
||||||
|
# 24623 | public | email_transmissions | 4 | 4h46m | 99 GB | 3 | 2h04m
|
||||||
|
# 24665 | public | lessons | 4 | 4h42m | 161 GB | 4 | 1m11s
|
||||||
|
# 24661 | public | lesson_users | 3 | 2h46m | 49 GB | 3 | 39m35s
|
||||||
|
# 24631 | public | emails | 1 | 34m07s | 10 GB | 2 | 17s
|
||||||
|
# 24739 | public | payments | 1 | 5m47s | 1848 MB | 4 | 4m40s
|
||||||
|
# 24681 | public | module_users | 1 | 4m57s | 1610 MB | 3 | 1m50s
|
||||||
|
# 24694 | public | orders | 1 | 2m50s | 835 MB | 3 | 1m05s
|
||||||
|
# 24597 | public | devices | 1 | 1m45s | 498 MB | 2 | 40s
|
||||||
|
# 24723 | public | payment_methods | 1 | 1m24s | 548 MB | 2 | 31s
|
||||||
|
# 24765 | public | user_collections | 1 | 2m17s | 1005 MB | 2 | 968ms
|
||||||
|
# 24774 | public | users | 1 | 52s | 291 MB | 4 | 27s
|
||||||
|
# 24760 | public | user_accounts | 1 | 16s | 172 MB | 3 | 16s
|
||||||
|
# 24606 | public | edition_modules | 1 | 8s983 | 46 MB | 3 | 4s749
|
||||||
|
# 24583 | public | course_emails | 1 | 8s526 | 26 MB | 2 | 996ms
|
||||||
|
# 24685 | public | modules | 1 | 1s592 | 21 MB | 3 | 1s696
|
||||||
|
# 24610 | public | editions | 1 | 2s199 | 7483 kB | 2 | 1s032
|
||||||
|
# 24755 | public | sp_content | 1 | 1s555 | 4177 kB | 0 | 0ms
|
||||||
|
# 24619 | public | email_broadcasts | 1 | 744ms | 2645 kB | 2 | 677ms
|
||||||
|
# 24590 | public | courses | 1 | 387ms | 1540 kB | 2 | 367ms
|
||||||
|
# 24704 | public | payment_gateway_keys | 1 | 1s972 | 164 kB | 2 | 27ms
|
||||||
|
# 24576 | public | accounts | 1 | 58ms | 24 kB | 1 | 14ms
|
||||||
|
# 24647 | public | event_names | 1 | 32ms | 397 B | 1 | 8ms
|
||||||
|
# 24716 | public | payment_gateways | 1 | 1s675 | 117 B | 1 | 11ms
|
||||||
|
# 24748 | public | roles | 1 | 71ms | 173 B | 1 | 8ms
|
||||||
|
# 24676 | public | management | 1 | 33ms | 40 B | 1 | 19ms
|
||||||
|
|
||||||
|
|
||||||
|
# Step Connection Duration Transfer Concurrency
|
||||||
|
# -------------------------------------------------- ---------- ---------- ---------- ------------
|
||||||
|
# Catalog Queries (table ordering, filtering, etc) source 12s 1
|
||||||
|
# Dump Schema source 765ms 1
|
||||||
|
# Prepare Schema target 466ms 1
|
||||||
|
# COPY, INDEX, CONSTRAINTS, VACUUM (wall clock) both 2h47m 12
|
||||||
|
# COPY (cumulative) both 7h46m 1225 GB 4
|
||||||
|
# CREATE INDEX (cumulative) target 4h36m 4
|
||||||
|
# CONSTRAINTS (cumulative) target 8s493 4
|
||||||
|
# VACUUM (cumulative) target 0ms 4
|
||||||
|
# Reset Sequences both 60ms 1
|
||||||
|
# Large Objects (cumulative) (null) 0ms 0
|
||||||
|
# Finalize Schema both 14h01m 4
|
||||||
|
# -------------------------------------------------- ---------- ---------- ---------- ------------
|
||||||
|
# Total Wall Clock Duration both 16h49m 20
|
||||||
|
|
||||||
|
|
||||||
|
# EOF
|
||||||
|
|
||||||
|
|
||||||
|
- name: show tables sizes and retrieve current backpressure seconds
|
||||||
run: |
|
run: |
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
${PSQL} "${NEW_PROJECT_CONNSTR}" -c "\dt+"
|
||||||
|
BACKPRESSURE_TIME_AFTER_INGEST=$(${PSQL} "${NEW_PROJECT_CONNSTR}" -t -c "select backpressure_throttling_time()/1000000;")
|
||||||
|
echo "BACKPRESSURE_TIME_AFTER_INGEST=${BACKPRESSURE_TIME_AFTER_INGEST}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Parse pgcopydb log and report performance metrics
|
||||||
|
env:
|
||||||
|
PERF_TEST_RESULT_CONNSTR: ${{ secrets.PERF_TEST_RESULT_CONNSTR }}
|
||||||
|
run: |
|
||||||
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
|
|
||||||
|
# Define the log file path
|
||||||
|
LOG_FILE="/tmp/pgcopydb_${{ matrix.target_project }}.log"
|
||||||
|
|
||||||
|
# Get the current git commit hash
|
||||||
|
git config --global --add safe.directory /__w/neon/neon
|
||||||
|
COMMIT_HASH=$(git rev-parse --short HEAD)
|
||||||
|
|
||||||
|
# Define the platform and test suite
|
||||||
|
PLATFORM="pg16-${{ matrix.target_project }}-us-east-2-staging"
|
||||||
|
SUIT="pgcopydb_ingest_bench"
|
||||||
|
|
||||||
|
# Function to convert time (e.g., "2h47m", "4h36m", "118ms", "8s493") to seconds
|
||||||
|
convert_to_seconds() {
|
||||||
|
local duration=$1
|
||||||
|
local total_seconds=0
|
||||||
|
|
||||||
|
# Check for hours (h)
|
||||||
|
if [[ "$duration" =~ ([0-9]+)h ]]; then
|
||||||
|
total_seconds=$((total_seconds + ${BASH_REMATCH[1]#0} * 3600))
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for seconds (s)
|
||||||
|
if [[ "$duration" =~ ([0-9]+)s ]]; then
|
||||||
|
total_seconds=$((total_seconds + ${BASH_REMATCH[1]#0}))
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for milliseconds (ms) (if applicable)
|
||||||
|
if [[ "$duration" =~ ([0-9]+)ms ]]; then
|
||||||
|
total_seconds=$((total_seconds + ${BASH_REMATCH[1]#0} / 1000))
|
||||||
|
duration=${duration/${BASH_REMATCH[0]}/} # need to remove it to avoid double counting with m
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for minutes (m) - must be checked after ms because m is contained in ms
|
||||||
|
if [[ "$duration" =~ ([0-9]+)m ]]; then
|
||||||
|
total_seconds=$((total_seconds + ${BASH_REMATCH[1]#0} * 60))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo $total_seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate the backpressure difference in seconds
|
||||||
|
BACKPRESSURE_TIME_DIFF=$(awk "BEGIN {print $BACKPRESSURE_TIME_AFTER_INGEST - $BACKPRESSURE_TIME_BEFORE_INGEST}")
|
||||||
|
|
||||||
|
# Insert the backpressure time difference into the performance database
|
||||||
|
if [ -n "$BACKPRESSURE_TIME_DIFF" ]; then
|
||||||
|
PSQL_CMD="${PSQL} \"${PERF_TEST_RESULT_CONNSTR}\" -c \"
|
||||||
|
INSERT INTO public.perf_test_results (suit, revision, platform, metric_name, metric_value, metric_unit, metric_report_type, recorded_at_timestamp)
|
||||||
|
VALUES ('${SUIT}', '${COMMIT_HASH}', '${PLATFORM}', 'backpressure_time', ${BACKPRESSURE_TIME_DIFF}, 'seconds', 'lower_is_better', now());
|
||||||
|
\""
|
||||||
|
echo "Inserting backpressure time difference: ${BACKPRESSURE_TIME_DIFF} seconds"
|
||||||
|
eval $PSQL_CMD
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract and process log lines
|
||||||
|
while IFS= read -r line; do
|
||||||
|
METRIC_NAME=""
|
||||||
|
# Match each desired line and extract the relevant information
|
||||||
|
if [[ "$line" =~ COPY,\ INDEX,\ CONSTRAINTS,\ VACUUM.* ]]; then
|
||||||
|
METRIC_NAME="COPY, INDEX, CONSTRAINTS, VACUUM (wall clock)"
|
||||||
|
elif [[ "$line" =~ COPY\ \(cumulative\).* ]]; then
|
||||||
|
METRIC_NAME="COPY (cumulative)"
|
||||||
|
elif [[ "$line" =~ CREATE\ INDEX\ \(cumulative\).* ]]; then
|
||||||
|
METRIC_NAME="CREATE INDEX (cumulative)"
|
||||||
|
elif [[ "$line" =~ CONSTRAINTS\ \(cumulative\).* ]]; then
|
||||||
|
METRIC_NAME="CONSTRAINTS (cumulative)"
|
||||||
|
elif [[ "$line" =~ Finalize\ Schema.* ]]; then
|
||||||
|
METRIC_NAME="Finalize Schema"
|
||||||
|
elif [[ "$line" =~ Total\ Wall\ Clock\ Duration.* ]]; then
|
||||||
|
METRIC_NAME="Total Wall Clock Duration"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If a metric was matched, insert it into the performance database
|
||||||
|
if [ -n "$METRIC_NAME" ]; then
|
||||||
|
DURATION=$(echo "$line" | grep -oP '\d+h\d+m|\d+s|\d+ms|\d{1,2}h\d{1,2}m|\d+\.\d+s' | head -n 1)
|
||||||
|
METRIC_VALUE=$(convert_to_seconds "$DURATION")
|
||||||
|
PSQL_CMD="${PSQL} \"${PERF_TEST_RESULT_CONNSTR}\" -c \"
|
||||||
|
INSERT INTO public.perf_test_results (suit, revision, platform, metric_name, metric_value, metric_unit, metric_report_type, recorded_at_timestamp)
|
||||||
|
VALUES ('${SUIT}', '${COMMIT_HASH}', '${PLATFORM}', '${METRIC_NAME}', ${METRIC_VALUE}, 'seconds', 'lower_is_better', now());
|
||||||
|
\""
|
||||||
|
echo "Inserting ${METRIC_NAME} with value ${METRIC_VALUE} seconds"
|
||||||
|
eval $PSQL_CMD
|
||||||
|
fi
|
||||||
|
done < "$LOG_FILE"
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
||||||
|
|||||||
12
.github/workflows/neon_extra_builds.yml
vendored
12
.github/workflows/neon_extra_builds.yml
vendored
@@ -26,9 +26,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
github-event-name: ${{ github.event_name}}
|
github-event-name: ${{ github.event_name}}
|
||||||
|
|
||||||
build-build-tools-image:
|
check-build-tools-image:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
|
uses: ./.github/workflows/check-build-tools-image.yml
|
||||||
|
|
||||||
|
build-build-tools-image:
|
||||||
|
needs: [ check-build-tools-image ]
|
||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
|
with:
|
||||||
|
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
check-macos-build:
|
check-macos-build:
|
||||||
@@ -38,7 +44,7 @@ jobs:
|
|||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
github.ref_name == 'main'
|
github.ref_name == 'main'
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
runs-on: macos-15
|
runs-on: macos-14
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Use release build only, to have less debug info around
|
# Use release build only, to have less debug info around
|
||||||
@@ -52,7 +58,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Install macOS postgres dependencies
|
- name: Install macOS postgres dependencies
|
||||||
run: brew install flex bison openssl protobuf icu4c
|
run: brew install flex bison openssl protobuf icu4c pkg-config
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
|
|||||||
10
.github/workflows/periodic_pagebench.yml
vendored
10
.github/workflows/periodic_pagebench.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
trigger_bench_on_ec2_machine_in_eu_central_1:
|
trigger_bench_on_ec2_machine_in_eu_central_1:
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned
|
||||||
credentials:
|
credentials:
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
@@ -72,7 +72,7 @@ jobs:
|
|||||||
echo "COMMIT_HASH=$INPUT_COMMIT_HASH" >> $GITHUB_ENV
|
echo "COMMIT_HASH=$INPUT_COMMIT_HASH" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Start Bench with run_id
|
- name: Start Bench with run_id
|
||||||
run: |
|
run: |
|
||||||
curl -k -X 'POST' \
|
curl -k -X 'POST' \
|
||||||
"${EC2_MACHINE_URL_US}/start_test/${GITHUB_RUN_ID}" \
|
"${EC2_MACHINE_URL_US}/start_test/${GITHUB_RUN_ID}" \
|
||||||
@@ -116,7 +116,7 @@ jobs:
|
|||||||
-H 'accept: application/gzip' \
|
-H 'accept: application/gzip' \
|
||||||
-H "Authorization: Bearer $API_KEY" \
|
-H "Authorization: Bearer $API_KEY" \
|
||||||
--output "test_log_${GITHUB_RUN_ID}.gz"
|
--output "test_log_${GITHUB_RUN_ID}.gz"
|
||||||
|
|
||||||
- name: Unzip Test Log and Print it into this job's log
|
- name: Unzip Test Log and Print it into this job's log
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
run: |
|
run: |
|
||||||
@@ -134,13 +134,13 @@ jobs:
|
|||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: "Periodic pagebench testing on dedicated hardware: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
slack-message: "Periodic pagebench testing on dedicated hardware: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
- name: Cleanup Test Resources
|
- name: Cleanup Test Resources
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
curl -k -X 'POST' \
|
curl -k -X 'POST' \
|
||||||
"${EC2_MACHINE_URL_US}/cleanup_test/${GITHUB_RUN_ID}" \
|
"${EC2_MACHINE_URL_US}/cleanup_test/${GITHUB_RUN_ID}" \
|
||||||
|
|||||||
8
.github/workflows/pg-clients.yml
vendored
8
.github/workflows/pg-clients.yml
vendored
@@ -39,9 +39,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
github-event-name: ${{ github.event_name }}
|
github-event-name: ${{ github.event_name }}
|
||||||
|
|
||||||
build-build-tools-image:
|
check-build-tools-image:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
|
uses: ./.github/workflows/check-build-tools-image.yml
|
||||||
|
|
||||||
|
build-build-tools-image:
|
||||||
|
needs: [ check-build-tools-image ]
|
||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
|
with:
|
||||||
|
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
test-logical-replication:
|
test-logical-replication:
|
||||||
|
|||||||
2
.github/workflows/pin-build-tools-image.yml
vendored
2
.github/workflows/pin-build-tools-image.yml
vendored
@@ -94,7 +94,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Tag build-tools with `${{ env.TO_TAG }}` in Docker Hub, ECR, and ACR
|
- name: Tag build-tools with `${{ env.TO_TAG }}` in Docker Hub, ECR, and ACR
|
||||||
env:
|
env:
|
||||||
DEFAULT_DEBIAN_VERSION: bookworm
|
DEFAULT_DEBIAN_VERSION: bullseye
|
||||||
run: |
|
run: |
|
||||||
for debian_version in bullseye bookworm; do
|
for debian_version in bullseye bookworm; do
|
||||||
tags=()
|
tags=()
|
||||||
|
|||||||
15
.github/workflows/pre-merge-checks.yml
vendored
15
.github/workflows/pre-merge-checks.yml
vendored
@@ -23,8 +23,6 @@ jobs:
|
|||||||
id: python-src
|
id: python-src
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
.github/workflows/_check-codestyle-python.yml
|
|
||||||
.github/workflows/build-build-tools-image.yml
|
|
||||||
.github/workflows/pre-merge-checks.yml
|
.github/workflows/pre-merge-checks.yml
|
||||||
**/**.py
|
**/**.py
|
||||||
poetry.lock
|
poetry.lock
|
||||||
@@ -36,14 +34,16 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "${PYTHON_CHANGED_FILES}"
|
echo "${PYTHON_CHANGED_FILES}"
|
||||||
|
|
||||||
build-build-tools-image:
|
check-build-tools-image:
|
||||||
if: needs.get-changed-files.outputs.python-changed == 'true'
|
if: needs.get-changed-files.outputs.python-changed == 'true'
|
||||||
needs: [ get-changed-files ]
|
needs: [ get-changed-files ]
|
||||||
|
uses: ./.github/workflows/check-build-tools-image.yml
|
||||||
|
|
||||||
|
build-build-tools-image:
|
||||||
|
needs: [ check-build-tools-image ]
|
||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
with:
|
with:
|
||||||
# Build only one combination to save time
|
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }}
|
||||||
archs: '["x64"]'
|
|
||||||
debians: '["bookworm"]'
|
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
check-codestyle-python:
|
check-codestyle-python:
|
||||||
@@ -51,8 +51,7 @@ jobs:
|
|||||||
needs: [ get-changed-files, build-build-tools-image ]
|
needs: [ get-changed-files, build-build-tools-image ]
|
||||||
uses: ./.github/workflows/_check-codestyle-python.yml
|
uses: ./.github/workflows/_check-codestyle-python.yml
|
||||||
with:
|
with:
|
||||||
# `-bookworm-x64` suffix should match the combination in `build-build-tools-image`
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm-x64
|
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# To get items from the merge queue merged into main we need to satisfy "Status checks that are required".
|
# To get items from the merge queue merged into main we need to satisfy "Status checks that are required".
|
||||||
|
|||||||
99
.github/workflows/release.yml
vendored
99
.github/workflows/release.yml
vendored
@@ -15,10 +15,6 @@ on:
|
|||||||
type: boolean
|
type: boolean
|
||||||
description: 'Create Proxy release PR'
|
description: 'Create Proxy release PR'
|
||||||
required: false
|
required: false
|
||||||
create-compute-release-branch:
|
|
||||||
type: boolean
|
|
||||||
description: 'Create Compute release PR'
|
|
||||||
required: false
|
|
||||||
|
|
||||||
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
||||||
permissions: {}
|
permissions: {}
|
||||||
@@ -29,40 +25,83 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }}
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write # for `git push`
|
||||||
|
|
||||||
uses: ./.github/workflows/_create-release-pr.yml
|
steps:
|
||||||
with:
|
- name: Check out code
|
||||||
component-name: 'Storage'
|
uses: actions/checkout@v4
|
||||||
release-branch: 'release'
|
with:
|
||||||
secrets:
|
ref: main
|
||||||
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
|
||||||
|
- name: Set environment variables
|
||||||
|
run: |
|
||||||
|
echo "RELEASE_DATE=$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
|
||||||
|
echo "RELEASE_BRANCH=rc/$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Create release branch
|
||||||
|
run: git checkout -b $RELEASE_BRANCH
|
||||||
|
|
||||||
|
- name: Push new branch
|
||||||
|
run: git push origin $RELEASE_BRANCH
|
||||||
|
|
||||||
|
- name: Create pull request into release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
|
run: |
|
||||||
|
TITLE="Storage & Compute release ${RELEASE_DATE}"
|
||||||
|
|
||||||
|
cat << EOF > body.md
|
||||||
|
## ${TITLE}
|
||||||
|
|
||||||
|
**Please merge this Pull Request using 'Create a merge commit' button**
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gh pr create --title "${TITLE}" \
|
||||||
|
--body-file "body.md" \
|
||||||
|
--head "${RELEASE_BRANCH}" \
|
||||||
|
--base "release"
|
||||||
|
|
||||||
create-proxy-release-branch:
|
create-proxy-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * THU' || inputs.create-proxy-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }}
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write # for `git push`
|
||||||
|
|
||||||
uses: ./.github/workflows/_create-release-pr.yml
|
steps:
|
||||||
with:
|
- name: Check out code
|
||||||
component-name: 'Proxy'
|
uses: actions/checkout@v4
|
||||||
release-branch: 'release-proxy'
|
with:
|
||||||
secrets:
|
ref: main
|
||||||
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
create-compute-release-branch:
|
- name: Set environment variables
|
||||||
if: inputs.create-compute-release-branch
|
run: |
|
||||||
|
echo "RELEASE_DATE=$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
|
||||||
|
echo "RELEASE_BRANCH=rc/proxy/$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
|
||||||
|
|
||||||
permissions:
|
- name: Create release branch
|
||||||
contents: write
|
run: git checkout -b $RELEASE_BRANCH
|
||||||
|
|
||||||
uses: ./.github/workflows/_create-release-pr.yml
|
- name: Push new branch
|
||||||
with:
|
run: git push origin $RELEASE_BRANCH
|
||||||
component-name: 'Compute'
|
|
||||||
release-branch: 'release-compute'
|
- name: Create pull request into release
|
||||||
secrets:
|
env:
|
||||||
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
|
run: |
|
||||||
|
TITLE="Proxy release ${RELEASE_DATE}"
|
||||||
|
|
||||||
|
cat << EOF > body.md
|
||||||
|
## ${TITLE}
|
||||||
|
|
||||||
|
**Please merge this Pull Request using 'Create a merge commit' button**
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gh pr create --title "${TITLE}" \
|
||||||
|
--body-file "body.md" \
|
||||||
|
--head "${RELEASE_BRANCH}" \
|
||||||
|
--base "release-proxy"
|
||||||
|
|||||||
@@ -4,12 +4,10 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '*/15 * * * *'
|
- cron: '*/15 * * * *'
|
||||||
- cron: '25 0 * * *'
|
- cron: '25 0 * * *'
|
||||||
- cron: '25 1 * * 6'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
gh-workflow-stats-batch-2h:
|
gh-workflow-stats-batch:
|
||||||
name: GitHub Workflow Stats Batch 2 hours
|
name: GitHub Workflow Stats Batch
|
||||||
if: github.event.schedule == '*/15 * * * *'
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
@@ -18,36 +16,14 @@ jobs:
|
|||||||
uses: neondatabase/gh-workflow-stats-action@v0.2.1
|
uses: neondatabase/gh-workflow-stats-action@v0.2.1
|
||||||
with:
|
with:
|
||||||
db_uri: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
db_uri: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
||||||
db_table: "gh_workflow_stats_neon"
|
db_table: "gh_workflow_stats_batch_neon"
|
||||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
duration: '2h'
|
duration: '2h'
|
||||||
|
- name: Export Workflow Run for the past 24 hours
|
||||||
gh-workflow-stats-batch-48h:
|
if: github.event.schedule == '25 0 * * *'
|
||||||
name: GitHub Workflow Stats Batch 48 hours
|
|
||||||
if: github.event.schedule == '25 0 * * *'
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
steps:
|
|
||||||
- name: Export Workflow Run for the past 48 hours
|
|
||||||
uses: neondatabase/gh-workflow-stats-action@v0.2.1
|
uses: neondatabase/gh-workflow-stats-action@v0.2.1
|
||||||
with:
|
with:
|
||||||
db_uri: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
db_uri: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
||||||
db_table: "gh_workflow_stats_neon"
|
db_table: "gh_workflow_stats_batch_neon"
|
||||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
duration: '48h'
|
duration: '24h'
|
||||||
|
|
||||||
gh-workflow-stats-batch-30d:
|
|
||||||
name: GitHub Workflow Stats Batch 30 days
|
|
||||||
if: github.event.schedule == '25 1 * * 6'
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
steps:
|
|
||||||
- name: Export Workflow Run for the past 30 days
|
|
||||||
uses: neondatabase/gh-workflow-stats-action@v0.2.1
|
|
||||||
with:
|
|
||||||
db_uri: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
|
||||||
db_table: "gh_workflow_stats_neon"
|
|
||||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
duration: '720h'
|
|
||||||
|
|||||||
42
.github/workflows/report-workflow-stats.yml
vendored
Normal file
42
.github/workflows/report-workflow-stats.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: Report Workflow Stats
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows:
|
||||||
|
- Add `external` label to issues and PRs created by external users
|
||||||
|
- Benchmarking
|
||||||
|
- Build and Test
|
||||||
|
- Build and Test Locally
|
||||||
|
- Build build-tools image
|
||||||
|
- Check Permissions
|
||||||
|
- Check build-tools image
|
||||||
|
- Check neon with extra platform builds
|
||||||
|
- Cloud Regression Test
|
||||||
|
- Create Release Branch
|
||||||
|
- Handle `approved-for-ci-run` label
|
||||||
|
- Lint GitHub Workflows
|
||||||
|
- Notify Slack channel about upcoming release
|
||||||
|
- Periodic pagebench performance test on dedicated EC2 machine in eu-central-1 region
|
||||||
|
- Pin build-tools image
|
||||||
|
- Prepare benchmarking databases by restoring dumps
|
||||||
|
- Push images to ACR
|
||||||
|
- Test Postgres client libraries
|
||||||
|
- Trigger E2E Tests
|
||||||
|
- cleanup caches by a branch
|
||||||
|
- Pre-merge checks
|
||||||
|
types: [completed]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
gh-workflow-stats:
|
||||||
|
name: Github Workflow Stats
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Export GH Workflow Stats
|
||||||
|
uses: neondatabase/gh-workflow-stats-action@v0.1.4
|
||||||
|
with:
|
||||||
|
DB_URI: ${{ secrets.GH_REPORT_STATS_DB_RW_CONNSTR }}
|
||||||
|
DB_TABLE: "gh_workflow_stats_neon"
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
GH_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||||
2
.github/workflows/trigger-e2e-tests.yml
vendored
2
.github/workflows/trigger-e2e-tests.yml
vendored
@@ -51,8 +51,6 @@ jobs:
|
|||||||
echo "tag=release-$(git rev-list --count HEAD)" | tee -a $GITHUB_OUTPUT
|
echo "tag=release-$(git rev-list --count HEAD)" | tee -a $GITHUB_OUTPUT
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
||||||
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
|
|
||||||
echo "tag=release-compute-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
BUILD_AND_TEST_RUN_ID=$(gh run list -b $CURRENT_BRANCH -c $CURRENT_SHA -w 'Build and Test' -L 1 --json databaseId --jq '.[].databaseId')
|
BUILD_AND_TEST_RUN_ID=$(gh run list -b $CURRENT_BRANCH -c $CURRENT_SHA -w 'Build and Test' -L 1 --json databaseId --jq '.[].databaseId')
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
/.github/ @neondatabase/developer-productivity
|
|
||||||
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
||||||
|
/storage_controller @neondatabase/storage
|
||||||
|
/storage_scrubber @neondatabase/storage
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
||||||
/libs/proxy/ @neondatabase/proxy
|
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
/libs/safekeeper_api/ @neondatabase/storage
|
/libs/safekeeper_api/ @neondatabase/storage
|
||||||
/libs/vm_monitor/ @neondatabase/autoscaling
|
/libs/vm_monitor/ @neondatabase/autoscaling
|
||||||
@@ -11,6 +11,4 @@
|
|||||||
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
||||||
/proxy/ @neondatabase/proxy
|
/proxy/ @neondatabase/proxy
|
||||||
/safekeeper/ @neondatabase/storage
|
/safekeeper/ @neondatabase/storage
|
||||||
/storage_controller @neondatabase/storage
|
|
||||||
/storage_scrubber @neondatabase/storage
|
|
||||||
/vendor/ @neondatabase/compute
|
/vendor/ @neondatabase/compute
|
||||||
|
|||||||
699
Cargo.lock
generated
699
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@@ -34,10 +34,6 @@ members = [
|
|||||||
"libs/vm_monitor",
|
"libs/vm_monitor",
|
||||||
"libs/walproposer",
|
"libs/walproposer",
|
||||||
"libs/wal_decoder",
|
"libs/wal_decoder",
|
||||||
"libs/postgres_initdb",
|
|
||||||
"libs/proxy/postgres-protocol2",
|
|
||||||
"libs/proxy/postgres-types2",
|
|
||||||
"libs/proxy/tokio-postgres2",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -61,7 +57,6 @@ async-trait = "0.1"
|
|||||||
aws-config = { version = "1.5", default-features = false, features=["rustls", "sso"] }
|
aws-config = { version = "1.5", default-features = false, features=["rustls", "sso"] }
|
||||||
aws-sdk-s3 = "1.52"
|
aws-sdk-s3 = "1.52"
|
||||||
aws-sdk-iam = "1.46.0"
|
aws-sdk-iam = "1.46.0"
|
||||||
aws-sdk-kms = "1.47.0"
|
|
||||||
aws-smithy-async = { version = "1.2.1", default-features = false, features=["rt-tokio"] }
|
aws-smithy-async = { version = "1.2.1", default-features = false, features=["rt-tokio"] }
|
||||||
aws-smithy-types = "1.2"
|
aws-smithy-types = "1.2"
|
||||||
aws-credential-types = "1.2.0"
|
aws-credential-types = "1.2.0"
|
||||||
@@ -74,16 +69,15 @@ bindgen = "0.70"
|
|||||||
bit_field = "0.10.2"
|
bit_field = "0.10.2"
|
||||||
bstr = "1.0"
|
bstr = "1.0"
|
||||||
byteorder = "1.4"
|
byteorder = "1.4"
|
||||||
bytes = "1.9"
|
bytes = "1.0"
|
||||||
camino = "1.1.6"
|
camino = "1.1.6"
|
||||||
cfg-if = "1.0.0"
|
cfg-if = "1.0.0"
|
||||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||||
clap = { version = "4.0", features = ["derive", "env"] }
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
comfy-table = "7.1"
|
comfy-table = "7.1"
|
||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
dashmap = { version = "5.5.0", features = ["raw-api"] }
|
dashmap = { version = "5.5.0", features = ["raw-api"] }
|
||||||
diatomic-waker = { version = "0.2.3" }
|
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
enum-map = "2.4.2"
|
enum-map = "2.4.2"
|
||||||
enumset = "1.0.12"
|
enumset = "1.0.12"
|
||||||
@@ -112,10 +106,9 @@ hyper-util = "0.1"
|
|||||||
tokio-tungstenite = "0.21.0"
|
tokio-tungstenite = "0.21.0"
|
||||||
indexmap = "2"
|
indexmap = "2"
|
||||||
indoc = "2"
|
indoc = "2"
|
||||||
ipnet = "2.10.0"
|
ipnet = "2.9.0"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
itoa = "1.0.11"
|
itoa = "1.0.11"
|
||||||
jemalloc_pprof = "0.6"
|
|
||||||
jsonwebtoken = "9"
|
jsonwebtoken = "9"
|
||||||
lasso = "0.7"
|
lasso = "0.7"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
@@ -128,16 +121,15 @@ notify = "6.0.0"
|
|||||||
num_cpus = "1.15"
|
num_cpus = "1.15"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
once_cell = "1.13"
|
once_cell = "1.13"
|
||||||
opentelemetry = "0.26"
|
opentelemetry = "0.24"
|
||||||
opentelemetry_sdk = "0.26"
|
opentelemetry_sdk = "0.24"
|
||||||
opentelemetry-otlp = { version = "0.26", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
opentelemetry-otlp = { version = "0.17", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
opentelemetry-semantic-conventions = "0.26"
|
opentelemetry-semantic-conventions = "0.16"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||||
parquet_derive = "53"
|
parquet_derive = "53"
|
||||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
pprof = { version = "0.14", features = ["criterion", "flamegraph", "protobuf", "protobuf-codec"] }
|
|
||||||
procfs = "0.16"
|
procfs = "0.16"
|
||||||
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
@@ -145,9 +137,9 @@ rand = "0.8"
|
|||||||
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
||||||
regex = "1.10.2"
|
regex = "1.10.2"
|
||||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
||||||
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_26"] }
|
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_24"] }
|
||||||
reqwest-middleware = "0.4"
|
reqwest-middleware = "0.3.0"
|
||||||
reqwest-retry = "0.7"
|
reqwest-retry = "0.5"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
rpds = "0.13"
|
rpds = "0.13"
|
||||||
rustc-hash = "1.1.0"
|
rustc-hash = "1.1.0"
|
||||||
@@ -161,7 +153,7 @@ sentry = { version = "0.32", default-features = false, features = ["backtrace",
|
|||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
serde_path_to_error = "0.1"
|
serde_path_to_error = "0.1"
|
||||||
serde_with = { version = "2.0", features = [ "base64" ] }
|
serde_with = "2.0"
|
||||||
serde_assert = "0.5.0"
|
serde_assert = "0.5.0"
|
||||||
sha2 = "0.10.2"
|
sha2 = "0.10.2"
|
||||||
signal-hook = "0.3"
|
signal-hook = "0.3"
|
||||||
@@ -176,7 +168,7 @@ sync_wrapper = "0.1.2"
|
|||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
test-context = "0.3"
|
test-context = "0.3"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] }
|
tikv-jemallocator = { version = "0.6", features = ["stats"] }
|
||||||
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
|
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
|
||||||
tokio = { version = "1.17", features = ["macros"] }
|
tokio = { version = "1.17", features = ["macros"] }
|
||||||
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
||||||
@@ -192,7 +184,7 @@ tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
|
|||||||
tower-service = "0.3.2"
|
tower-service = "0.3.2"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-error = "0.2"
|
tracing-error = "0.2"
|
||||||
tracing-opentelemetry = "0.27"
|
tracing-opentelemetry = "0.25"
|
||||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||||
try-lock = "0.2.5"
|
try-lock = "0.2.5"
|
||||||
twox-hash = { version = "1.6.3", default-features = false }
|
twox-hash = { version = "1.6.3", default-features = false }
|
||||||
@@ -220,14 +212,12 @@ tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", br
|
|||||||
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||||
pageserver = { path = "./pageserver" }
|
|
||||||
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
||||||
pageserver_client = { path = "./pageserver/client" }
|
pageserver_client = { path = "./pageserver/client" }
|
||||||
pageserver_compaction = { version = "0.1", path = "./pageserver/compaction/" }
|
pageserver_compaction = { version = "0.1", path = "./pageserver/compaction/" }
|
||||||
postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
|
postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
|
||||||
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
||||||
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
||||||
postgres_initdb = { path = "./libs/postgres_initdb" }
|
|
||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ ARG IMAGE=build-tools
|
|||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
ARG DEFAULT_PG_VERSION=17
|
ARG DEFAULT_PG_VERSION=17
|
||||||
ARG STABLE_PG_VERSION=16
|
ARG STABLE_PG_VERSION=16
|
||||||
ARG DEBIAN_VERSION=bookworm
|
ARG DEBIAN_VERSION=bullseye
|
||||||
ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
|
ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
|
||||||
|
|
||||||
# Build Postgres
|
# Build Postgres
|
||||||
|
|||||||
3
Makefile
3
Makefile
@@ -38,7 +38,6 @@ ifeq ($(UNAME_S),Linux)
|
|||||||
# Seccomp BPF is only available for Linux
|
# Seccomp BPF is only available for Linux
|
||||||
PG_CONFIGURE_OPTS += --with-libseccomp
|
PG_CONFIGURE_OPTS += --with-libseccomp
|
||||||
else ifeq ($(UNAME_S),Darwin)
|
else ifeq ($(UNAME_S),Darwin)
|
||||||
PG_CFLAGS += -DUSE_PREFETCH
|
|
||||||
ifndef DISABLE_HOMEBREW
|
ifndef DISABLE_HOMEBREW
|
||||||
# macOS with brew-installed openssl requires explicit paths
|
# macOS with brew-installed openssl requires explicit paths
|
||||||
# It can be configured with OPENSSL_PREFIX variable
|
# It can be configured with OPENSSL_PREFIX variable
|
||||||
@@ -147,8 +146,6 @@ postgres-%: postgres-configure-% \
|
|||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install
|
||||||
+@echo "Compiling pg_buffercache $*"
|
+@echo "Compiling pg_buffercache $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
||||||
+@echo "Compiling pg_visibility $*"
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_visibility install
|
|
||||||
+@echo "Compiling pageinspect $*"
|
+@echo "Compiling pageinspect $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
||||||
+@echo "Compiling amcheck $*"
|
+@echo "Compiling amcheck $*"
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ make -j`sysctl -n hw.logicalcpu` -s
|
|||||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||||
|
|
||||||
To run the integration tests or Python scripts (not required to use the code), install
|
To run the integration tests or Python scripts (not required to use the code), install
|
||||||
Python (3.11 or higher), and install the python3 packages using `./scripts/pysync` (requires [poetry>=1.8](https://python-poetry.org/)) in the project directory.
|
Python (3.9 or higher), and install the python3 packages using `./scripts/pysync` (requires [poetry>=1.8](https://python-poetry.org/)) in the project directory.
|
||||||
|
|
||||||
|
|
||||||
#### Running neon database
|
#### Running neon database
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ARG DEBIAN_VERSION=bookworm
|
ARG DEBIAN_VERSION=bullseye
|
||||||
|
|
||||||
FROM debian:bookworm-slim AS pgcopydb_builder
|
FROM debian:bookworm-slim AS pgcopydb_builder
|
||||||
ARG DEBIAN_VERSION
|
ARG DEBIAN_VERSION
|
||||||
@@ -57,9 +57,9 @@ RUN mkdir -p /pgcopydb/bin && \
|
|||||||
mkdir -p /pgcopydb/lib && \
|
mkdir -p /pgcopydb/lib && \
|
||||||
chmod -R 755 /pgcopydb && \
|
chmod -R 755 /pgcopydb && \
|
||||||
chown -R nonroot:nonroot /pgcopydb
|
chown -R nonroot:nonroot /pgcopydb
|
||||||
|
|
||||||
COPY --from=pgcopydb_builder /usr/lib/postgresql/16/bin/pgcopydb /pgcopydb/bin/pgcopydb
|
COPY --from=pgcopydb_builder /usr/lib/postgresql/16/bin/pgcopydb /pgcopydb/bin/pgcopydb
|
||||||
COPY --from=pgcopydb_builder /pgcopydb/lib/libpq.so.5 /pgcopydb/lib/libpq.so.5
|
COPY --from=pgcopydb_builder /pgcopydb/lib/libpq.so.5 /pgcopydb/lib/libpq.so.5
|
||||||
|
|
||||||
# System deps
|
# System deps
|
||||||
#
|
#
|
||||||
@@ -234,7 +234,7 @@ USER nonroot:nonroot
|
|||||||
WORKDIR /home/nonroot
|
WORKDIR /home/nonroot
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
ENV PYTHON_VERSION=3.11.10 \
|
ENV PYTHON_VERSION=3.9.19 \
|
||||||
PYENV_ROOT=/home/nonroot/.pyenv \
|
PYENV_ROOT=/home/nonroot/.pyenv \
|
||||||
PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH
|
PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
@@ -258,14 +258,14 @@ WORKDIR /home/nonroot
|
|||||||
|
|
||||||
# Rust
|
# Rust
|
||||||
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
||||||
ENV RUSTC_VERSION=1.83.0
|
ENV RUSTC_VERSION=1.82.0
|
||||||
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
||||||
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
||||||
ARG RUSTFILT_VERSION=0.2.1
|
ARG RUSTFILT_VERSION=0.2.1
|
||||||
ARG CARGO_HAKARI_VERSION=0.9.33
|
ARG CARGO_HAKARI_VERSION=0.9.30
|
||||||
ARG CARGO_DENY_VERSION=0.16.2
|
ARG CARGO_DENY_VERSION=0.16.1
|
||||||
ARG CARGO_HACK_VERSION=0.6.33
|
ARG CARGO_HACK_VERSION=0.6.31
|
||||||
ARG CARGO_NEXTEST_VERSION=0.9.85
|
ARG CARGO_NEXTEST_VERSION=0.9.72
|
||||||
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
||||||
chmod +x rustup-init && \
|
chmod +x rustup-init && \
|
||||||
./rustup-init -y --default-toolchain ${RUSTC_VERSION} && \
|
./rustup-init -y --default-toolchain ${RUSTC_VERSION} && \
|
||||||
@@ -289,7 +289,7 @@ RUN whoami \
|
|||||||
&& cargo --version --verbose \
|
&& cargo --version --verbose \
|
||||||
&& rustup --version --verbose \
|
&& rustup --version --verbose \
|
||||||
&& rustc --version --verbose \
|
&& rustc --version --verbose \
|
||||||
&& clang --version
|
&& clang --version
|
||||||
|
|
||||||
RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \
|
RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \
|
||||||
LD_LIBRARY_PATH=/pgcopydb/lib /pgcopydb/bin/pgcopydb --version; \
|
LD_LIBRARY_PATH=/pgcopydb/lib /pgcopydb/bin/pgcopydb --version; \
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ ARG REPOSITORY=neondatabase
|
|||||||
ARG IMAGE=build-tools
|
ARG IMAGE=build-tools
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
ARG BUILD_TAG
|
ARG BUILD_TAG
|
||||||
ARG DEBIAN_VERSION=bookworm
|
ARG DEBIAN_VERSION=bullseye
|
||||||
ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
|
ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -14,9 +14,6 @@ ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
|
|||||||
FROM debian:$DEBIAN_FLAVOR AS build-deps
|
FROM debian:$DEBIAN_FLAVOR AS build-deps
|
||||||
ARG DEBIAN_VERSION
|
ARG DEBIAN_VERSION
|
||||||
|
|
||||||
# Use strict mode for bash to catch errors early
|
|
||||||
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN case $DEBIAN_VERSION in \
|
RUN case $DEBIAN_VERSION in \
|
||||||
# Version-specific installs for Bullseye (PG14-PG16):
|
# Version-specific installs for Bullseye (PG14-PG16):
|
||||||
# The h3_pg extension needs a cmake 3.20+, but Debian bullseye has 3.18.
|
# The h3_pg extension needs a cmake 3.20+, but Debian bullseye has 3.18.
|
||||||
@@ -109,7 +106,6 @@ RUN cd postgres && \
|
|||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM build-deps AS postgis-build
|
FROM build-deps AS postgis-build
|
||||||
ARG DEBIAN_VERSION
|
|
||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
@@ -126,12 +122,12 @@ RUN apt update && \
|
|||||||
# and also we must check backward compatibility with older versions of PostGIS.
|
# and also we must check backward compatibility with older versions of PostGIS.
|
||||||
#
|
#
|
||||||
# Use new version only for v17
|
# Use new version only for v17
|
||||||
RUN case "${DEBIAN_VERSION}" in \
|
RUN case "${PG_VERSION}" in \
|
||||||
"bookworm") \
|
"v17") \
|
||||||
export SFCGAL_VERSION=1.4.1 \
|
export SFCGAL_VERSION=1.4.1 \
|
||||||
export SFCGAL_CHECKSUM=1800c8a26241588f11cddcf433049e9b9aea902e923414d2ecef33a3295626c3 \
|
export SFCGAL_CHECKSUM=1800c8a26241588f11cddcf433049e9b9aea902e923414d2ecef33a3295626c3 \
|
||||||
;; \
|
;; \
|
||||||
"bullseye") \
|
"v14" | "v15" | "v16") \
|
||||||
export SFCGAL_VERSION=1.3.10 \
|
export SFCGAL_VERSION=1.3.10 \
|
||||||
export SFCGAL_CHECKSUM=4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 \
|
export SFCGAL_CHECKSUM=4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 \
|
||||||
;; \
|
;; \
|
||||||
@@ -232,8 +228,6 @@ FROM build-deps AS plv8-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
|
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
|
||||||
|
|
||||||
@@ -245,6 +239,8 @@ RUN apt update && \
|
|||||||
#
|
#
|
||||||
# Use new version only for v17
|
# Use new version only for v17
|
||||||
# because since v3.2, plv8 doesn't include plcoffee and plls extensions
|
# because since v3.2, plv8 doesn't include plcoffee and plls extensions
|
||||||
|
ENV PLV8_TAG=v3.2.3
|
||||||
|
|
||||||
RUN case "${PG_VERSION}" in \
|
RUN case "${PG_VERSION}" in \
|
||||||
"v17") \
|
"v17") \
|
||||||
export PLV8_TAG=v3.2.3 \
|
export PLV8_TAG=v3.2.3 \
|
||||||
@@ -259,9 +255,8 @@ RUN case "${PG_VERSION}" in \
|
|||||||
git clone --recurse-submodules --depth 1 --branch ${PLV8_TAG} https://github.com/plv8/plv8.git plv8-src && \
|
git clone --recurse-submodules --depth 1 --branch ${PLV8_TAG} https://github.com/plv8/plv8.git plv8-src && \
|
||||||
tar -czf plv8.tar.gz --exclude .git plv8-src && \
|
tar -czf plv8.tar.gz --exclude .git plv8-src && \
|
||||||
cd plv8-src && \
|
cd plv8-src && \
|
||||||
if [[ "${PG_VERSION}" < "v17" ]]; then patch -p1 < /plv8-3.1.10.patch; fi && \
|
|
||||||
# generate and copy upgrade scripts
|
# generate and copy upgrade scripts
|
||||||
mkdir -p upgrade && ./generate_upgrade.sh ${PLV8_TAG#v} && \
|
mkdir -p upgrade && ./generate_upgrade.sh 3.1.10 && \
|
||||||
cp upgrade/* /usr/local/pgsql/share/extension/ && \
|
cp upgrade/* /usr/local/pgsql/share/extension/ && \
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
@@ -358,10 +353,10 @@ COPY compute/patches/pgvector.patch /pgvector.patch
|
|||||||
# because we build the images on different machines than where we run them.
|
# because we build the images on different machines than where we run them.
|
||||||
# Pass OPTFLAGS="" to remove it.
|
# Pass OPTFLAGS="" to remove it.
|
||||||
#
|
#
|
||||||
# vector >0.7.4 supports v17
|
# vector 0.7.4 supports v17
|
||||||
# last release v0.8.0 - Oct 30, 2024
|
# last release v0.7.4 - Aug 5, 2024
|
||||||
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz -O pgvector.tar.gz && \
|
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.4.tar.gz -O pgvector.tar.gz && \
|
||||||
echo "867a2c328d4928a5a9d6f052cd3bc78c7d60228a9b914ad32aa3db88e9de27b0 pgvector.tar.gz" | sha256sum --check && \
|
echo "0341edf89b1924ae0d552f617e14fb7f8867c0194ed775bcc44fa40288642583 pgvector.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||||
patch -p1 < /pgvector.patch && \
|
patch -p1 < /pgvector.patch && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -1248,7 +1243,7 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Compile and run the Neon-specific `compute_ctl` and `fast_import` binaries
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
@@ -1269,7 +1264,6 @@ RUN cd compute_tools && mold -run cargo build --locked --profile release-line-de
|
|||||||
FROM debian:$DEBIAN_FLAVOR AS compute-tools-image
|
FROM debian:$DEBIAN_FLAVOR AS compute-tools-image
|
||||||
|
|
||||||
COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_import /usr/local/bin/fast_import
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1367,12 +1361,15 @@ RUN make PG_VERSION="${PG_VERSION}" -C compute
|
|||||||
|
|
||||||
FROM neon-pg-ext-build AS neon-pg-ext-test
|
FROM neon-pg-ext-build AS neon-pg-ext-test
|
||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
RUN mkdir /ext-src
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
|
esac && \
|
||||||
|
mkdir /ext-src
|
||||||
|
|
||||||
#COPY --from=postgis-build /postgis.tar.gz /ext-src/
|
#COPY --from=postgis-build /postgis.tar.gz /ext-src/
|
||||||
#COPY --from=postgis-build /sfcgal/* /usr
|
#COPY --from=postgis-build /sfcgal/* /usr
|
||||||
COPY --from=plv8-build /plv8.tar.gz /ext-src/
|
COPY --from=plv8-build /plv8.tar.gz /ext-src/
|
||||||
#COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/
|
COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/
|
||||||
COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/
|
COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/
|
||||||
COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/
|
COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/
|
||||||
COPY --from=vector-pg-build /pgvector.patch /ext-src/
|
COPY --from=vector-pg-build /pgvector.patch /ext-src/
|
||||||
@@ -1392,7 +1389,7 @@ COPY --from=hll-pg-build /hll.tar.gz /ext-src
|
|||||||
COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src
|
COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src
|
||||||
#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src
|
#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src
|
||||||
COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src
|
COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src
|
||||||
COPY compute/patches/pg_hint_plan_${PG_VERSION}.patch /ext-src
|
COPY compute/patches/pg_hint_plan.patch /ext-src
|
||||||
COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src
|
COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src
|
||||||
COPY compute/patches/pg_cron.patch /ext-src
|
COPY compute/patches/pg_cron.patch /ext-src
|
||||||
#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src
|
#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src
|
||||||
@@ -1402,23 +1399,38 @@ COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src
|
|||||||
COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src
|
COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src
|
||||||
#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src
|
#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src
|
||||||
#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src
|
#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src
|
||||||
#pg_anon is not supported yet for pg v17 so, don't fail if nothing found
|
COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src
|
||||||
COPY --from=pg-anon-pg-build /pg_anon.tar.g? /ext-src
|
|
||||||
COPY compute/patches/pg_anon.patch /ext-src
|
COPY compute/patches/pg_anon.patch /ext-src
|
||||||
COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src
|
COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src
|
||||||
COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src
|
COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src
|
||||||
RUN cd /ext-src/ && for f in *.tar.gz; \
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
|
esac && \
|
||||||
|
cd /ext-src/ && for f in *.tar.gz; \
|
||||||
do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \
|
do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \
|
||||||
rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \
|
rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \
|
||||||
|| exit 1; rm -f $f; done
|
|| exit 1; rm -f $f; done
|
||||||
RUN cd /ext-src/rum-src && patch -p1 <../rum.patch
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
RUN cd /ext-src/pg_hint_plan-src && patch -p1 < /ext-src/pg_hint_plan_${PG_VERSION}.patch
|
esac && \
|
||||||
|
cd /ext-src/rum-src && patch -p1 <../rum.patch
|
||||||
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
|
esac && \
|
||||||
|
cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch
|
||||||
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
|
esac && \
|
||||||
|
cd /ext-src/pg_hint_plan-src && patch -p1 < /ext-src/pg_hint_plan.patch
|
||||||
COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh
|
COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh
|
||||||
RUN case "${PG_VERSION}" in "v17") \
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
echo "postgresql_anonymizer does not yet support PG17" && exit 0;; \
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
esac && patch -p1 </ext-src/pg_anon.patch
|
esac && \
|
||||||
RUN patch -p1 </ext-src/pg_cron.patch
|
patch -p1 </ext-src/pg_anon.patch
|
||||||
|
RUN case "${PG_VERSION}" in "v17") \
|
||||||
|
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
|
||||||
|
esac && \
|
||||||
|
patch -p1 </ext-src/pg_cron.patch
|
||||||
ENV PATH=/usr/local/pgsql/bin:$PATH
|
ENV PATH=/usr/local/pgsql/bin:$PATH
|
||||||
ENV PGHOST=compute
|
ENV PGHOST=compute
|
||||||
ENV PGPORT=55433
|
ENV PGPORT=55433
|
||||||
@@ -1446,7 +1458,6 @@ RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|||||||
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/fast_import /usr/local/bin/fast_import
|
|
||||||
|
|
||||||
# pgbouncer and its config
|
# pgbouncer and its config
|
||||||
COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/pgbouncer
|
COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/pgbouncer
|
||||||
@@ -1522,25 +1533,6 @@ RUN apt update && \
|
|||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2
|
|
||||||
# used by fast_import
|
|
||||||
ARG TARGETARCH
|
|
||||||
ADD https://github.com/peak/s5cmd/releases/download/v2.2.2/s5cmd_2.2.2_linux_$TARGETARCH.deb /tmp/s5cmd.deb
|
|
||||||
RUN set -ex; \
|
|
||||||
\
|
|
||||||
# Determine the expected checksum based on TARGETARCH
|
|
||||||
if [ "${TARGETARCH}" = "amd64" ]; then \
|
|
||||||
CHECKSUM="392c385320cd5ffa435759a95af77c215553d967e4b1c0fffe52e4f14c29cf85"; \
|
|
||||||
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
|
||||||
CHECKSUM="939bee3cf4b5604ddb00e67f8c157b91d7c7a5b553d1fbb6890fad32894b7b46"; \
|
|
||||||
else \
|
|
||||||
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
|
||||||
fi; \
|
|
||||||
\
|
|
||||||
# Compute and validate the checksum
|
|
||||||
echo "${CHECKSUM} /tmp/s5cmd.deb" | sha256sum -c -
|
|
||||||
RUN dpkg -i /tmp/s5cmd.deb && rm /tmp/s5cmd.deb
|
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
|
|||||||
@@ -6,7 +6,6 @@
|
|||||||
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
||||||
import 'sql_exporter/compute_current_lsn.libsonnet',
|
import 'sql_exporter/compute_current_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
|
||||||
import 'sql_exporter/compute_max_connections.libsonnet',
|
import 'sql_exporter/compute_max_connections.libsonnet',
|
||||||
import 'sql_exporter/compute_receive_lsn.libsonnet',
|
import 'sql_exporter/compute_receive_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_subscriptions_count.libsonnet',
|
import 'sql_exporter/compute_subscriptions_count.libsonnet',
|
||||||
|
|||||||
@@ -1,9 +1,5 @@
|
|||||||
[databases]
|
[databases]
|
||||||
;; pgbouncer propagates application_name (if it's specified) to the server, but some
|
*=host=localhost port=5432 auth_user=cloud_admin
|
||||||
;; clients don't set it. We set default application_name=pgbouncer to make it
|
|
||||||
;; easier to identify pgbouncer connections in Postgres. If client sets
|
|
||||||
;; application_name, it will be used instead.
|
|
||||||
*=host=localhost port=5432 auth_user=cloud_admin application_name=pgbouncer
|
|
||||||
[pgbouncer]
|
[pgbouncer]
|
||||||
listen_port=6432
|
listen_port=6432
|
||||||
listen_addr=0.0.0.0
|
listen_addr=0.0.0.0
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
SELECT
|
|
||||||
(SELECT current_setting('neon.timeline_id')) AS timeline_id,
|
|
||||||
-- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp.
|
|
||||||
-- These temporary snapshot files are renamed to the actual snapshot files
|
|
||||||
-- after they are completely built. We only WAL-log the completely built
|
|
||||||
-- snapshot files
|
|
||||||
(SELECT COALESCE(sum(size), 0) FROM pg_ls_logicalsnapdir() WHERE name LIKE '%.snap') AS logical_snapshots_bytes;
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
local neon = import 'neon.libsonnet';
|
|
||||||
|
|
||||||
local pg_ls_logicalsnapdir = importstr 'sql_exporter/compute_logical_snapshots_bytes.15.sql';
|
|
||||||
local pg_ls_dir = importstr 'sql_exporter/compute_logical_snapshots_bytes.sql';
|
|
||||||
|
|
||||||
{
|
|
||||||
metric_name: 'compute_logical_snapshots_bytes',
|
|
||||||
type: 'gauge',
|
|
||||||
help: 'Size of the pg_logical/snapshots directory, not including temporary files',
|
|
||||||
key_labels: [
|
|
||||||
'timeline_id',
|
|
||||||
],
|
|
||||||
values: [
|
|
||||||
'logical_snapshots_bytes',
|
|
||||||
],
|
|
||||||
query: if neon.PG_MAJORVERSION_NUM < 15 then pg_ls_dir else pg_ls_logicalsnapdir,
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
SELECT
|
|
||||||
(SELECT setting FROM pg_settings WHERE name = 'neon.timeline_id') AS timeline_id,
|
|
||||||
-- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp.
|
|
||||||
-- These temporary snapshot files are renamed to the actual snapshot files
|
|
||||||
-- after they are completely built. We only WAL-log the completely built
|
|
||||||
-- snapshot files
|
|
||||||
(SELECT COALESCE(sum((pg_stat_file('pg_logical/snapshots/' || name, missing_ok => true)).size), 0)
|
|
||||||
FROM (SELECT * FROM pg_ls_dir('pg_logical/snapshots') WHERE pg_ls_dir LIKE '%.snap') AS name
|
|
||||||
) AS logical_snapshots_bytes;
|
|
||||||
@@ -147,7 +147,7 @@ index 542c2e098c..0062d3024f 100644
|
|||||||
ALTER TABLE ptnowner1 OWNER TO regress_ptnowner;
|
ALTER TABLE ptnowner1 OWNER TO regress_ptnowner;
|
||||||
ALTER TABLE ptnowner OWNER TO regress_ptnowner;
|
ALTER TABLE ptnowner OWNER TO regress_ptnowner;
|
||||||
diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out
|
diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out
|
||||||
index 3f9a8f539c..0a51b52940 100644
|
index 97bbe53b64..eac3d42a79 100644
|
||||||
--- a/src/test/regress/expected/collate.icu.utf8.out
|
--- a/src/test/regress/expected/collate.icu.utf8.out
|
||||||
+++ b/src/test/regress/expected/collate.icu.utf8.out
|
+++ b/src/test/regress/expected/collate.icu.utf8.out
|
||||||
@@ -1016,7 +1016,7 @@ select * from collate_test1 where b ilike 'ABC';
|
@@ -1016,7 +1016,7 @@ select * from collate_test1 where b ilike 'ABC';
|
||||||
@@ -309,7 +309,7 @@ index b48365ec98..a6ef910055 100644
|
|||||||
-- the wrong partition. This test is *not* guaranteed to trigger that bug, but
|
-- the wrong partition. This test is *not* guaranteed to trigger that bug, but
|
||||||
-- does so when shared_buffers is small enough. To test if we encountered the
|
-- does so when shared_buffers is small enough. To test if we encountered the
|
||||||
diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out
|
diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out
|
||||||
index 9a74820ee8..22400a5551 100644
|
index faf1a4d1b0..a44c97db52 100644
|
||||||
--- a/src/test/regress/expected/copy2.out
|
--- a/src/test/regress/expected/copy2.out
|
||||||
+++ b/src/test/regress/expected/copy2.out
|
+++ b/src/test/regress/expected/copy2.out
|
||||||
@@ -553,8 +553,8 @@ select * from check_con_tbl;
|
@@ -553,8 +553,8 @@ select * from check_con_tbl;
|
||||||
@@ -573,7 +573,7 @@ index 93302a07ef..1a73f083ac 100644
|
|||||||
-- that does not match with what's expected.
|
-- that does not match with what's expected.
|
||||||
-- This checks all the object types that include schema qualifications.
|
-- This checks all the object types that include schema qualifications.
|
||||||
diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out
|
diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out
|
||||||
index f551624afb..57f1e432d4 100644
|
index f3f8c7b5a2..3e3e54ff4c 100644
|
||||||
--- a/src/test/regress/expected/create_view.out
|
--- a/src/test/regress/expected/create_view.out
|
||||||
+++ b/src/test/regress/expected/create_view.out
|
+++ b/src/test/regress/expected/create_view.out
|
||||||
@@ -18,7 +18,8 @@ CREATE TABLE real_city (
|
@@ -18,7 +18,8 @@ CREATE TABLE real_city (
|
||||||
@@ -700,12 +700,12 @@ index 6ed50fdcfa..caa00a345d 100644
|
|||||||
COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
|
COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
|
||||||
CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator;
|
CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator;
|
||||||
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
|
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
|
||||||
index 6b8c2f2414..8e13b7fa46 100644
|
index 12e523c737..8872e23935 100644
|
||||||
--- a/src/test/regress/expected/foreign_key.out
|
--- a/src/test/regress/expected/foreign_key.out
|
||||||
+++ b/src/test/regress/expected/foreign_key.out
|
+++ b/src/test/regress/expected/foreign_key.out
|
||||||
@@ -1985,7 +1985,7 @@ ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES
|
@@ -1968,7 +1968,7 @@ ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2
|
||||||
ERROR: cannot ALTER TABLE "fk_partitioned_pk_61" because it is being used by active queries in this session
|
FOR VALUES IN (1600);
|
||||||
DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6;
|
-- leave these tables around intentionally
|
||||||
-- test the case when the referenced table is owned by a different user
|
-- test the case when the referenced table is owned by a different user
|
||||||
-create role regress_other_partitioned_fk_owner;
|
-create role regress_other_partitioned_fk_owner;
|
||||||
+create role regress_other_partitioned_fk_owner PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+create role regress_other_partitioned_fk_owner PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
@@ -713,7 +713,7 @@ index 6b8c2f2414..8e13b7fa46 100644
|
|||||||
set role regress_other_partitioned_fk_owner;
|
set role regress_other_partitioned_fk_owner;
|
||||||
create table other_partitioned_fk(a int, b int) partition by list (a);
|
create table other_partitioned_fk(a int, b int) partition by list (a);
|
||||||
diff --git a/src/test/regress/expected/generated.out b/src/test/regress/expected/generated.out
|
diff --git a/src/test/regress/expected/generated.out b/src/test/regress/expected/generated.out
|
||||||
index 5881420388..4ae21aa43c 100644
|
index 0f623f7119..b48588a54e 100644
|
||||||
--- a/src/test/regress/expected/generated.out
|
--- a/src/test/regress/expected/generated.out
|
||||||
+++ b/src/test/regress/expected/generated.out
|
+++ b/src/test/regress/expected/generated.out
|
||||||
@@ -534,7 +534,7 @@ CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STOR
|
@@ -534,7 +534,7 @@ CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STOR
|
||||||
@@ -762,7 +762,7 @@ index a2036a1597..805d73b9d2 100644
|
|||||||
-- fields, leading to long bucket chains and lots of table expansion.
|
-- fields, leading to long bucket chains and lots of table expansion.
|
||||||
-- this is therefore a stress test of the bucket overflow code (unlike
|
-- this is therefore a stress test of the bucket overflow code (unlike
|
||||||
diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out
|
diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out
|
||||||
index 1b74958de9..078187b542 100644
|
index cc7772349f..98a08eb48d 100644
|
||||||
--- a/src/test/regress/expected/identity.out
|
--- a/src/test/regress/expected/identity.out
|
||||||
+++ b/src/test/regress/expected/identity.out
|
+++ b/src/test/regress/expected/identity.out
|
||||||
@@ -520,7 +520,7 @@ ALTER TABLE itest7 ALTER COLUMN a SET GENERATED BY DEFAULT;
|
@@ -520,7 +520,7 @@ ALTER TABLE itest7 ALTER COLUMN a SET GENERATED BY DEFAULT;
|
||||||
@@ -775,10 +775,10 @@ index 1b74958de9..078187b542 100644
|
|||||||
GRANT SELECT, INSERT ON itest8 TO regress_identity_user1;
|
GRANT SELECT, INSERT ON itest8 TO regress_identity_user1;
|
||||||
SET ROLE regress_identity_user1;
|
SET ROLE regress_identity_user1;
|
||||||
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
|
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
|
||||||
index 8f831c95c3..ec681b52af 100644
|
index 4943429e9b..0257f22b15 100644
|
||||||
--- a/src/test/regress/expected/inherit.out
|
--- a/src/test/regress/expected/inherit.out
|
||||||
+++ b/src/test/regress/expected/inherit.out
|
+++ b/src/test/regress/expected/inherit.out
|
||||||
@@ -2636,7 +2636,7 @@ create index on permtest_parent (left(c, 3));
|
@@ -2606,7 +2606,7 @@ create index on permtest_parent (left(c, 3));
|
||||||
insert into permtest_parent
|
insert into permtest_parent
|
||||||
select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
|
select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
|
||||||
analyze permtest_parent;
|
analyze permtest_parent;
|
||||||
@@ -1133,7 +1133,7 @@ index 8475231735..1afae5395f 100644
|
|||||||
SELECT rolname, rolpassword
|
SELECT rolname, rolpassword
|
||||||
FROM pg_authid
|
FROM pg_authid
|
||||||
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
||||||
index 5b9dba7b32..cc408dad42 100644
|
index fbb0489a4f..2905194e2c 100644
|
||||||
--- a/src/test/regress/expected/privileges.out
|
--- a/src/test/regress/expected/privileges.out
|
||||||
+++ b/src/test/regress/expected/privileges.out
|
+++ b/src/test/regress/expected/privileges.out
|
||||||
@@ -20,19 +20,19 @@ SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3
|
@@ -20,19 +20,19 @@ SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3
|
||||||
@@ -1185,7 +1185,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
GRANT pg_read_all_data TO regress_priv_user6;
|
GRANT pg_read_all_data TO regress_priv_user6;
|
||||||
GRANT pg_write_all_data TO regress_priv_user7;
|
GRANT pg_write_all_data TO regress_priv_user7;
|
||||||
GRANT pg_read_all_settings TO regress_priv_user8 WITH ADMIN OPTION;
|
GRANT pg_read_all_settings TO regress_priv_user8 WITH ADMIN OPTION;
|
||||||
@@ -212,8 +212,8 @@ REVOKE pg_read_all_settings FROM regress_priv_user8;
|
@@ -145,8 +145,8 @@ REVOKE pg_read_all_settings FROM regress_priv_user8;
|
||||||
DROP USER regress_priv_user10;
|
DROP USER regress_priv_user10;
|
||||||
DROP USER regress_priv_user9;
|
DROP USER regress_priv_user9;
|
||||||
DROP USER regress_priv_user8;
|
DROP USER regress_priv_user8;
|
||||||
@@ -1196,7 +1196,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4;
|
ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4;
|
||||||
GRANT regress_priv_group2 TO regress_priv_user2 GRANTED BY regress_priv_user1;
|
GRANT regress_priv_group2 TO regress_priv_user2 GRANTED BY regress_priv_user1;
|
||||||
SET SESSION AUTHORIZATION regress_priv_user1;
|
SET SESSION AUTHORIZATION regress_priv_user1;
|
||||||
@@ -239,12 +239,16 @@ GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY regre
|
@@ -172,12 +172,16 @@ GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY regre
|
||||||
ERROR: permission denied to grant privileges as role "regress_priv_role"
|
ERROR: permission denied to grant privileges as role "regress_priv_role"
|
||||||
DETAIL: The grantor must have the ADMIN option on role "regress_priv_role".
|
DETAIL: The grantor must have the ADMIN option on role "regress_priv_role".
|
||||||
GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY CURRENT_ROLE;
|
GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY CURRENT_ROLE;
|
||||||
@@ -1213,7 +1213,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
DROP ROLE regress_priv_role;
|
DROP ROLE regress_priv_role;
|
||||||
SET SESSION AUTHORIZATION regress_priv_user1;
|
SET SESSION AUTHORIZATION regress_priv_user1;
|
||||||
SELECT session_user, current_user;
|
SELECT session_user, current_user;
|
||||||
@@ -1776,7 +1780,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP
|
@@ -1709,7 +1713,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP
|
||||||
|
|
||||||
-- security-restricted operations
|
-- security-restricted operations
|
||||||
\c -
|
\c -
|
||||||
@@ -1222,7 +1222,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
-- Check that index expressions and predicates are run as the table's owner
|
-- Check that index expressions and predicates are run as the table's owner
|
||||||
-- A dummy index function checking current_user
|
-- A dummy index function checking current_user
|
||||||
CREATE FUNCTION sro_ifun(int) RETURNS int AS $$
|
CREATE FUNCTION sro_ifun(int) RETURNS int AS $$
|
||||||
@@ -2668,8 +2672,8 @@ drop cascades to function testns.priv_testagg(integer)
|
@@ -2601,8 +2605,8 @@ drop cascades to function testns.priv_testagg(integer)
|
||||||
drop cascades to function testns.priv_testproc(integer)
|
drop cascades to function testns.priv_testproc(integer)
|
||||||
-- Change owner of the schema & and rename of new schema owner
|
-- Change owner of the schema & and rename of new schema owner
|
||||||
\c -
|
\c -
|
||||||
@@ -1233,7 +1233,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
SET SESSION ROLE regress_schemauser1;
|
SET SESSION ROLE regress_schemauser1;
|
||||||
CREATE SCHEMA testns;
|
CREATE SCHEMA testns;
|
||||||
SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid;
|
SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid;
|
||||||
@@ -2792,7 +2796,7 @@ DROP USER regress_priv_user7;
|
@@ -2725,7 +2729,7 @@ DROP USER regress_priv_user7;
|
||||||
DROP USER regress_priv_user8; -- does not exist
|
DROP USER regress_priv_user8; -- does not exist
|
||||||
ERROR: role "regress_priv_user8" does not exist
|
ERROR: role "regress_priv_user8" does not exist
|
||||||
-- permissions with LOCK TABLE
|
-- permissions with LOCK TABLE
|
||||||
@@ -1242,7 +1242,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
CREATE TABLE lock_table (a int);
|
CREATE TABLE lock_table (a int);
|
||||||
-- LOCK TABLE and SELECT permission
|
-- LOCK TABLE and SELECT permission
|
||||||
GRANT SELECT ON lock_table TO regress_locktable_user;
|
GRANT SELECT ON lock_table TO regress_locktable_user;
|
||||||
@@ -2874,7 +2878,7 @@ DROP USER regress_locktable_user;
|
@@ -2807,7 +2811,7 @@ DROP USER regress_locktable_user;
|
||||||
-- pg_backend_memory_contexts.
|
-- pg_backend_memory_contexts.
|
||||||
-- switch to superuser
|
-- switch to superuser
|
||||||
\c -
|
\c -
|
||||||
@@ -1251,7 +1251,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no
|
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no
|
||||||
has_table_privilege
|
has_table_privilege
|
||||||
---------------------
|
---------------------
|
||||||
@@ -2918,10 +2922,10 @@ RESET ROLE;
|
@@ -2851,10 +2855,10 @@ RESET ROLE;
|
||||||
-- clean up
|
-- clean up
|
||||||
DROP ROLE regress_readallstats;
|
DROP ROLE regress_readallstats;
|
||||||
-- test role grantor machinery
|
-- test role grantor machinery
|
||||||
@@ -1266,7 +1266,7 @@ index 5b9dba7b32..cc408dad42 100644
|
|||||||
GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE;
|
GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE;
|
||||||
GRANT regress_group_direct_manager TO regress_group_indirect_manager;
|
GRANT regress_group_direct_manager TO regress_group_indirect_manager;
|
||||||
SET SESSION AUTHORIZATION regress_group_direct_manager;
|
SET SESSION AUTHORIZATION regress_group_direct_manager;
|
||||||
@@ -2950,9 +2954,9 @@ DROP ROLE regress_group_direct_manager;
|
@@ -2883,9 +2887,9 @@ DROP ROLE regress_group_direct_manager;
|
||||||
DROP ROLE regress_group_indirect_manager;
|
DROP ROLE regress_group_indirect_manager;
|
||||||
DROP ROLE regress_group_member;
|
DROP ROLE regress_group_member;
|
||||||
-- test SET and INHERIT options with object ownership changes
|
-- test SET and INHERIT options with object ownership changes
|
||||||
@@ -1813,7 +1813,7 @@ index 5e6969b173..2c4d52237f 100644
|
|||||||
|
|
||||||
-- clean up roles
|
-- clean up roles
|
||||||
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
|
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
|
||||||
index 218c0c2863..f7af0cfb12 100644
|
index 97ca9bf72c..b2a7a6f710 100644
|
||||||
--- a/src/test/regress/expected/rowsecurity.out
|
--- a/src/test/regress/expected/rowsecurity.out
|
||||||
+++ b/src/test/regress/expected/rowsecurity.out
|
+++ b/src/test/regress/expected/rowsecurity.out
|
||||||
@@ -14,13 +14,13 @@ DROP ROLE IF EXISTS regress_rls_group2;
|
@@ -14,13 +14,13 @@ DROP ROLE IF EXISTS regress_rls_group2;
|
||||||
@@ -1917,19 +1917,6 @@ index b79fe9a1c0..e29fab88ab 100644
|
|||||||
ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
|
ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
|
||||||
REVOKE INSERT ON TABLES FROM regress_selinto_user;
|
REVOKE INSERT ON TABLES FROM regress_selinto_user;
|
||||||
GRANT ALL ON SCHEMA selinto_schema TO public;
|
GRANT ALL ON SCHEMA selinto_schema TO public;
|
||||||
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
|
|
||||||
index afc6ab08c2..dfcd891af3 100644
|
|
||||||
--- a/src/test/regress/expected/select_parallel.out
|
|
||||||
+++ b/src/test/regress/expected/select_parallel.out
|
|
||||||
@@ -1220,7 +1220,7 @@ SELECT 1 FROM tenk1_vw_sec
|
|
||||||
|
|
||||||
rollback;
|
|
||||||
-- test that function option SET ROLE works in parallel workers.
|
|
||||||
-create role regress_parallel_worker;
|
|
||||||
+create role regress_parallel_worker PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
create function set_and_report_role() returns text as
|
|
||||||
$$ select current_setting('role') $$ language sql parallel safe
|
|
||||||
set role = regress_parallel_worker;
|
|
||||||
diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out
|
diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out
|
||||||
index 1aeed8452b..7d9427d070 100644
|
index 1aeed8452b..7d9427d070 100644
|
||||||
--- a/src/test/regress/expected/select_views.out
|
--- a/src/test/regress/expected/select_views.out
|
||||||
@@ -2382,7 +2369,7 @@ index 6cb9c926c0..5e689e4062 100644
|
|||||||
ALTER TABLE ptnowner1 OWNER TO regress_ptnowner;
|
ALTER TABLE ptnowner1 OWNER TO regress_ptnowner;
|
||||||
ALTER TABLE ptnowner OWNER TO regress_ptnowner;
|
ALTER TABLE ptnowner OWNER TO regress_ptnowner;
|
||||||
diff --git a/src/test/regress/sql/collate.icu.utf8.sql b/src/test/regress/sql/collate.icu.utf8.sql
|
diff --git a/src/test/regress/sql/collate.icu.utf8.sql b/src/test/regress/sql/collate.icu.utf8.sql
|
||||||
index 8aa902d5ab..24bb823b86 100644
|
index 3db9e25913..c66d5aa2c2 100644
|
||||||
--- a/src/test/regress/sql/collate.icu.utf8.sql
|
--- a/src/test/regress/sql/collate.icu.utf8.sql
|
||||||
+++ b/src/test/regress/sql/collate.icu.utf8.sql
|
+++ b/src/test/regress/sql/collate.icu.utf8.sql
|
||||||
@@ -353,7 +353,7 @@ reset enable_seqscan;
|
@@ -353,7 +353,7 @@ reset enable_seqscan;
|
||||||
@@ -2545,7 +2532,7 @@ index 43d2e906dd..6c993d70f0 100644
|
|||||||
-- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from
|
-- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from
|
||||||
-- the wrong partition. This test is *not* guaranteed to trigger that bug, but
|
-- the wrong partition. This test is *not* guaranteed to trigger that bug, but
|
||||||
diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql
|
diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql
|
||||||
index cf3828c16e..cf3ca38175 100644
|
index d759635068..d58e50dcc5 100644
|
||||||
--- a/src/test/regress/sql/copy2.sql
|
--- a/src/test/regress/sql/copy2.sql
|
||||||
+++ b/src/test/regress/sql/copy2.sql
|
+++ b/src/test/regress/sql/copy2.sql
|
||||||
@@ -365,8 +365,8 @@ copy check_con_tbl from stdin;
|
@@ -365,8 +365,8 @@ copy check_con_tbl from stdin;
|
||||||
@@ -2787,7 +2774,7 @@ index 1b7064247a..be5b662ce1 100644
|
|||||||
-- Cases where schema creation fails as objects are qualified with a schema
|
-- Cases where schema creation fails as objects are qualified with a schema
|
||||||
-- that does not match with what's expected.
|
-- that does not match with what's expected.
|
||||||
diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql
|
diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql
|
||||||
index ae6841308b..47bc792e30 100644
|
index 3a78be1b0c..617d2dc8d6 100644
|
||||||
--- a/src/test/regress/sql/create_view.sql
|
--- a/src/test/regress/sql/create_view.sql
|
||||||
+++ b/src/test/regress/sql/create_view.sql
|
+++ b/src/test/regress/sql/create_view.sql
|
||||||
@@ -23,7 +23,8 @@ CREATE TABLE real_city (
|
@@ -23,7 +23,8 @@ CREATE TABLE real_city (
|
||||||
@@ -2914,11 +2901,11 @@ index aa147b14a9..370e0dd570 100644
|
|||||||
CREATE FOREIGN DATA WRAPPER dummy;
|
CREATE FOREIGN DATA WRAPPER dummy;
|
||||||
COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
|
COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
|
||||||
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
|
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
|
||||||
index 45c7a534cb..32dd26b8cd 100644
|
index 22e177f89b..7138d5e1d4 100644
|
||||||
--- a/src/test/regress/sql/foreign_key.sql
|
--- a/src/test/regress/sql/foreign_key.sql
|
||||||
+++ b/src/test/regress/sql/foreign_key.sql
|
+++ b/src/test/regress/sql/foreign_key.sql
|
||||||
@@ -1435,7 +1435,7 @@ ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES
|
@@ -1418,7 +1418,7 @@ ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2
|
||||||
DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6;
|
-- leave these tables around intentionally
|
||||||
|
|
||||||
-- test the case when the referenced table is owned by a different user
|
-- test the case when the referenced table is owned by a different user
|
||||||
-create role regress_other_partitioned_fk_owner;
|
-create role regress_other_partitioned_fk_owner;
|
||||||
@@ -2976,7 +2963,7 @@ index 527024f710..de49c0b85f 100644
|
|||||||
-- the data in this file has a lot of duplicates in the index key
|
-- the data in this file has a lot of duplicates in the index key
|
||||||
-- fields, leading to long bucket chains and lots of table expansion.
|
-- fields, leading to long bucket chains and lots of table expansion.
|
||||||
diff --git a/src/test/regress/sql/identity.sql b/src/test/regress/sql/identity.sql
|
diff --git a/src/test/regress/sql/identity.sql b/src/test/regress/sql/identity.sql
|
||||||
index 7537258a75..9041e35e34 100644
|
index 91d2e443b4..241c93f373 100644
|
||||||
--- a/src/test/regress/sql/identity.sql
|
--- a/src/test/regress/sql/identity.sql
|
||||||
+++ b/src/test/regress/sql/identity.sql
|
+++ b/src/test/regress/sql/identity.sql
|
||||||
@@ -287,7 +287,7 @@ ALTER TABLE itest7 ALTER COLUMN a RESTART;
|
@@ -287,7 +287,7 @@ ALTER TABLE itest7 ALTER COLUMN a RESTART;
|
||||||
@@ -2989,10 +2976,10 @@ index 7537258a75..9041e35e34 100644
|
|||||||
GRANT SELECT, INSERT ON itest8 TO regress_identity_user1;
|
GRANT SELECT, INSERT ON itest8 TO regress_identity_user1;
|
||||||
SET ROLE regress_identity_user1;
|
SET ROLE regress_identity_user1;
|
||||||
diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql
|
diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql
|
||||||
index b5b554a125..109889ad24 100644
|
index fe699c54d5..bdd5993f45 100644
|
||||||
--- a/src/test/regress/sql/inherit.sql
|
--- a/src/test/regress/sql/inherit.sql
|
||||||
+++ b/src/test/regress/sql/inherit.sql
|
+++ b/src/test/regress/sql/inherit.sql
|
||||||
@@ -958,7 +958,7 @@ create index on permtest_parent (left(c, 3));
|
@@ -950,7 +950,7 @@ create index on permtest_parent (left(c, 3));
|
||||||
insert into permtest_parent
|
insert into permtest_parent
|
||||||
select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
|
select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
|
||||||
analyze permtest_parent;
|
analyze permtest_parent;
|
||||||
@@ -3231,7 +3218,7 @@ index 53e86b0b6c..f07cf1ec54 100644
|
|||||||
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
|
|
||||||
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
||||||
index 249df17a58..b258e7f26a 100644
|
index 3f68cafcd1..004b26831d 100644
|
||||||
--- a/src/test/regress/sql/privileges.sql
|
--- a/src/test/regress/sql/privileges.sql
|
||||||
+++ b/src/test/regress/sql/privileges.sql
|
+++ b/src/test/regress/sql/privileges.sql
|
||||||
@@ -24,18 +24,18 @@ RESET client_min_messages;
|
@@ -24,18 +24,18 @@ RESET client_min_messages;
|
||||||
@@ -3282,7 +3269,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
GRANT pg_read_all_data TO regress_priv_user6;
|
GRANT pg_read_all_data TO regress_priv_user6;
|
||||||
GRANT pg_write_all_data TO regress_priv_user7;
|
GRANT pg_write_all_data TO regress_priv_user7;
|
||||||
@@ -163,8 +163,8 @@ DROP USER regress_priv_user10;
|
@@ -130,8 +130,8 @@ DROP USER regress_priv_user10;
|
||||||
DROP USER regress_priv_user9;
|
DROP USER regress_priv_user9;
|
||||||
DROP USER regress_priv_user8;
|
DROP USER regress_priv_user8;
|
||||||
|
|
||||||
@@ -3293,7 +3280,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4;
|
ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4;
|
||||||
|
|
||||||
@@ -1157,7 +1157,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP
|
@@ -1124,7 +1124,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP
|
||||||
|
|
||||||
-- security-restricted operations
|
-- security-restricted operations
|
||||||
\c -
|
\c -
|
||||||
@@ -3302,7 +3289,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
-- Check that index expressions and predicates are run as the table's owner
|
-- Check that index expressions and predicates are run as the table's owner
|
||||||
|
|
||||||
@@ -1653,8 +1653,8 @@ DROP SCHEMA testns CASCADE;
|
@@ -1620,8 +1620,8 @@ DROP SCHEMA testns CASCADE;
|
||||||
-- Change owner of the schema & and rename of new schema owner
|
-- Change owner of the schema & and rename of new schema owner
|
||||||
\c -
|
\c -
|
||||||
|
|
||||||
@@ -3313,7 +3300,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
SET SESSION ROLE regress_schemauser1;
|
SET SESSION ROLE regress_schemauser1;
|
||||||
CREATE SCHEMA testns;
|
CREATE SCHEMA testns;
|
||||||
@@ -1748,7 +1748,7 @@ DROP USER regress_priv_user8; -- does not exist
|
@@ -1715,7 +1715,7 @@ DROP USER regress_priv_user8; -- does not exist
|
||||||
|
|
||||||
|
|
||||||
-- permissions with LOCK TABLE
|
-- permissions with LOCK TABLE
|
||||||
@@ -3322,7 +3309,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
CREATE TABLE lock_table (a int);
|
CREATE TABLE lock_table (a int);
|
||||||
|
|
||||||
-- LOCK TABLE and SELECT permission
|
-- LOCK TABLE and SELECT permission
|
||||||
@@ -1836,7 +1836,7 @@ DROP USER regress_locktable_user;
|
@@ -1803,7 +1803,7 @@ DROP USER regress_locktable_user;
|
||||||
-- switch to superuser
|
-- switch to superuser
|
||||||
\c -
|
\c -
|
||||||
|
|
||||||
@@ -3331,7 +3318,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no
|
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no
|
||||||
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no
|
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no
|
||||||
@@ -1856,10 +1856,10 @@ RESET ROLE;
|
@@ -1823,10 +1823,10 @@ RESET ROLE;
|
||||||
DROP ROLE regress_readallstats;
|
DROP ROLE regress_readallstats;
|
||||||
|
|
||||||
-- test role grantor machinery
|
-- test role grantor machinery
|
||||||
@@ -3346,7 +3333,7 @@ index 249df17a58..b258e7f26a 100644
|
|||||||
|
|
||||||
GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE;
|
GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE;
|
||||||
GRANT regress_group_direct_manager TO regress_group_indirect_manager;
|
GRANT regress_group_direct_manager TO regress_group_indirect_manager;
|
||||||
@@ -1881,9 +1881,9 @@ DROP ROLE regress_group_indirect_manager;
|
@@ -1848,9 +1848,9 @@ DROP ROLE regress_group_indirect_manager;
|
||||||
DROP ROLE regress_group_member;
|
DROP ROLE regress_group_member;
|
||||||
|
|
||||||
-- test SET and INHERIT options with object ownership changes
|
-- test SET and INHERIT options with object ownership changes
|
||||||
@@ -3638,7 +3625,7 @@ index c961b2d730..0859b89c4f 100644
|
|||||||
-- clean up roles
|
-- clean up roles
|
||||||
DROP ROLE regress_test_def_superuser;
|
DROP ROLE regress_test_def_superuser;
|
||||||
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
|
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
|
||||||
index d3bfd53e23..919ce1d0c6 100644
|
index dec7340538..cdbc03a5cc 100644
|
||||||
--- a/src/test/regress/sql/rowsecurity.sql
|
--- a/src/test/regress/sql/rowsecurity.sql
|
||||||
+++ b/src/test/regress/sql/rowsecurity.sql
|
+++ b/src/test/regress/sql/rowsecurity.sql
|
||||||
@@ -20,13 +20,13 @@ DROP SCHEMA IF EXISTS regress_rls_schema CASCADE;
|
@@ -20,13 +20,13 @@ DROP SCHEMA IF EXISTS regress_rls_schema CASCADE;
|
||||||
@@ -3714,19 +3701,6 @@ index 689c448cc2..223ceb1d75 100644
|
|||||||
ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
|
ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
|
||||||
REVOKE INSERT ON TABLES FROM regress_selinto_user;
|
REVOKE INSERT ON TABLES FROM regress_selinto_user;
|
||||||
GRANT ALL ON SCHEMA selinto_schema TO public;
|
GRANT ALL ON SCHEMA selinto_schema TO public;
|
||||||
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
|
|
||||||
index 33d78e16dc..cb193c9b27 100644
|
|
||||||
--- a/src/test/regress/sql/select_parallel.sql
|
|
||||||
+++ b/src/test/regress/sql/select_parallel.sql
|
|
||||||
@@ -464,7 +464,7 @@ SELECT 1 FROM tenk1_vw_sec
|
|
||||||
rollback;
|
|
||||||
|
|
||||||
-- test that function option SET ROLE works in parallel workers.
|
|
||||||
-create role regress_parallel_worker;
|
|
||||||
+create role regress_parallel_worker PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
create function set_and_report_role() returns text as
|
|
||||||
$$ select current_setting('role') $$ language sql parallel safe
|
|
||||||
diff --git a/src/test/regress/sql/select_views.sql b/src/test/regress/sql/select_views.sql
|
diff --git a/src/test/regress/sql/select_views.sql b/src/test/regress/sql/select_views.sql
|
||||||
index e742f13699..7bd0255df8 100644
|
index e742f13699..7bd0255df8 100644
|
||||||
--- a/src/test/regress/sql/select_views.sql
|
--- a/src/test/regress/sql/select_views.sql
|
||||||
|
|||||||
@@ -1,174 +0,0 @@
|
|||||||
diff --git a/expected/ut-A.out b/expected/ut-A.out
|
|
||||||
index e7d68a1..65a056c 100644
|
|
||||||
--- a/expected/ut-A.out
|
|
||||||
+++ b/expected/ut-A.out
|
|
||||||
@@ -9,13 +9,16 @@ SET search_path TO public;
|
|
||||||
----
|
|
||||||
-- No.A-1-1-3
|
|
||||||
CREATE EXTENSION pg_hint_plan;
|
|
||||||
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
|
|
||||||
-- No.A-1-2-3
|
|
||||||
DROP EXTENSION pg_hint_plan;
|
|
||||||
-- No.A-1-1-4
|
|
||||||
CREATE SCHEMA other_schema;
|
|
||||||
CREATE EXTENSION pg_hint_plan SCHEMA other_schema;
|
|
||||||
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
|
|
||||||
ERROR: extension "pg_hint_plan" must be installed in schema "hint_plan"
|
|
||||||
CREATE EXTENSION pg_hint_plan;
|
|
||||||
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
|
|
||||||
DROP SCHEMA other_schema;
|
|
||||||
----
|
|
||||||
---- No. A-5-1 comment pattern
|
|
||||||
diff --git a/expected/ut-J.out b/expected/ut-J.out
|
|
||||||
index 2fa3c70..314e929 100644
|
|
||||||
--- a/expected/ut-J.out
|
|
||||||
+++ b/expected/ut-J.out
|
|
||||||
@@ -789,38 +789,6 @@ NestLoop(st1 st2)
|
|
||||||
MergeJoin(t1 t2)
|
|
||||||
not used hint:
|
|
||||||
duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-NestLoop(st1 st2)
|
|
||||||
-MergeJoin(t1 t2)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-NestLoop(st1 st2)
|
|
||||||
-MergeJoin(t1 t2)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-NestLoop(st1 st2)
|
|
||||||
-MergeJoin(t1 t2)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-NestLoop(st1 st2)
|
|
||||||
-MergeJoin(t1 t2)
|
|
||||||
-duplication hint:
|
|
||||||
error hint:
|
|
||||||
|
|
||||||
explain_filter
|
|
||||||
diff --git a/expected/ut-S.out b/expected/ut-S.out
|
|
||||||
index 0bfcfb8..e75f581 100644
|
|
||||||
--- a/expected/ut-S.out
|
|
||||||
+++ b/expected/ut-S.out
|
|
||||||
@@ -4415,34 +4415,6 @@ used hint:
|
|
||||||
IndexScan(ti1 ti1_pred)
|
|
||||||
not used hint:
|
|
||||||
duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(ti1 ti1_pred)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(ti1 ti1_pred)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(ti1 ti1_pred)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(ti1 ti1_pred)
|
|
||||||
-duplication hint:
|
|
||||||
error hint:
|
|
||||||
|
|
||||||
explain_filter
|
|
||||||
diff --git a/expected/ut-W.out b/expected/ut-W.out
|
|
||||||
index a09bd34..0ad227c 100644
|
|
||||||
--- a/expected/ut-W.out
|
|
||||||
+++ b/expected/ut-W.out
|
|
||||||
@@ -1341,54 +1341,6 @@ IndexScan(ft1)
|
|
||||||
IndexScan(t)
|
|
||||||
Parallel(s1 3 hard)
|
|
||||||
duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(*VALUES*)
|
|
||||||
-SeqScan(cte1)
|
|
||||||
-IndexScan(ft1)
|
|
||||||
-IndexScan(t)
|
|
||||||
-Parallel(p1 5 hard)
|
|
||||||
-Parallel(s1 3 hard)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(*VALUES*)
|
|
||||||
-SeqScan(cte1)
|
|
||||||
-IndexScan(ft1)
|
|
||||||
-IndexScan(t)
|
|
||||||
-Parallel(p1 5 hard)
|
|
||||||
-Parallel(s1 3 hard)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(*VALUES*)
|
|
||||||
-SeqScan(cte1)
|
|
||||||
-IndexScan(ft1)
|
|
||||||
-IndexScan(t)
|
|
||||||
-Parallel(p1 5 hard)
|
|
||||||
-Parallel(s1 3 hard)
|
|
||||||
-duplication hint:
|
|
||||||
-error hint:
|
|
||||||
-
|
|
||||||
-LOG: pg_hint_plan:
|
|
||||||
-used hint:
|
|
||||||
-not used hint:
|
|
||||||
-IndexScan(*VALUES*)
|
|
||||||
-SeqScan(cte1)
|
|
||||||
-IndexScan(ft1)
|
|
||||||
-IndexScan(t)
|
|
||||||
-Parallel(p1 5 hard)
|
|
||||||
-Parallel(s1 3 hard)
|
|
||||||
-duplication hint:
|
|
||||||
error hint:
|
|
||||||
|
|
||||||
explain_filter
|
|
||||||
diff --git a/expected/ut-fdw.out b/expected/ut-fdw.out
|
|
||||||
index 017fa4b..98d989b 100644
|
|
||||||
--- a/expected/ut-fdw.out
|
|
||||||
+++ b/expected/ut-fdw.out
|
|
||||||
@@ -7,6 +7,7 @@ SET pg_hint_plan.debug_print TO on;
|
|
||||||
SET client_min_messages TO LOG;
|
|
||||||
SET pg_hint_plan.enable_hint TO on;
|
|
||||||
CREATE EXTENSION file_fdw;
|
|
||||||
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/file_fdw
|
|
||||||
CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw;
|
|
||||||
CREATE USER MAPPING FOR PUBLIC SERVER file_server;
|
|
||||||
CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename :'filename');
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
commit 46b38d3e46f9cd6c70d9b189dd6ff4abaa17cf5e
|
|
||||||
Author: Alexander Bayandin <alexander@neon.tech>
|
|
||||||
Date: Sat Nov 30 18:29:32 2024 +0000
|
|
||||||
|
|
||||||
Fix v8 9.7.37 compilation on Debian 12
|
|
||||||
|
|
||||||
diff --git a/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch b/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch
|
|
||||||
new file mode 100644
|
|
||||||
index 0000000..f0a5dc7
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch
|
|
||||||
@@ -0,0 +1,30 @@
|
|
||||||
+From 84cf3230a9680aac3b73c410c2b758760b6d3066 Mon Sep 17 00:00:00 2001
|
|
||||||
+From: Michael Lippautz <mlippautz@chromium.org>
|
|
||||||
+Date: Thu, 27 Jan 2022 14:14:11 +0100
|
|
||||||
+Subject: [PATCH] cppgc: Fix include
|
|
||||||
+
|
|
||||||
+Add <utility> to cover for std::exchange.
|
|
||||||
+
|
|
||||||
+Bug: v8:12585
|
|
||||||
+Change-Id: Ida65144e93e466be8914527d0e646f348c136bcb
|
|
||||||
+Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3420309
|
|
||||||
+Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
|
|
||||||
+Reviewed-by: Omer Katz <omerkatz@chromium.org>
|
|
||||||
+Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
|
|
||||||
+Cr-Commit-Position: refs/heads/main@{#78820}
|
|
||||||
+---
|
|
||||||
+ src/heap/cppgc/prefinalizer-handler.h | 1 +
|
|
||||||
+ 1 file changed, 1 insertion(+)
|
|
||||||
+
|
|
||||||
+diff --git a/src/heap/cppgc/prefinalizer-handler.h b/src/heap/cppgc/prefinalizer-handler.h
|
|
||||||
+index bc17c99b1838..c82c91ff5a45 100644
|
|
||||||
+--- a/src/heap/cppgc/prefinalizer-handler.h
|
|
||||||
++++ b/src/heap/cppgc/prefinalizer-handler.h
|
|
||||||
+@@ -5,6 +5,7 @@
|
|
||||||
+ #ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
|
|
||||||
+ #define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
|
|
||||||
+
|
|
||||||
++#include <utility>
|
|
||||||
+ #include <vector>
|
|
||||||
+
|
|
||||||
+ #include "include/cppgc/prefinalizer.h"
|
|
||||||
@@ -10,10 +10,6 @@ default = []
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64.workspace = true
|
|
||||||
aws-config.workspace = true
|
|
||||||
aws-sdk-s3.workspace = true
|
|
||||||
aws-sdk-kms.workspace = true
|
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
camino.workspace = true
|
camino.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
@@ -31,8 +27,6 @@ opentelemetry.workspace = true
|
|||||||
opentelemetry_sdk.workspace = true
|
opentelemetry_sdk.workspace = true
|
||||||
postgres.workspace = true
|
postgres.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
serde.workspace = true
|
|
||||||
serde_with.workspace = true
|
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
signal-hook.workspace = true
|
signal-hook.workspace = true
|
||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
@@ -49,7 +43,6 @@ thiserror.workspace = true
|
|||||||
url.workspace = true
|
url.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
|
|
||||||
postgres_initdb.workspace = true
|
|
||||||
compute_api.workspace = true
|
compute_api.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ use std::collections::HashMap;
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock};
|
use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock};
|
||||||
use std::{thread, time::Duration};
|
use std::{thread, time::Duration};
|
||||||
@@ -59,7 +58,7 @@ use compute_tools::compute::{
|
|||||||
forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
|
forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
|
||||||
};
|
};
|
||||||
use compute_tools::configurator::launch_configurator;
|
use compute_tools::configurator::launch_configurator;
|
||||||
use compute_tools::extension_server::get_pg_version_string;
|
use compute_tools::extension_server::get_pg_version;
|
||||||
use compute_tools::http::api::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
@@ -106,11 +105,6 @@ fn main() -> Result<()> {
|
|||||||
fn init() -> Result<(String, clap::ArgMatches)> {
|
fn init() -> Result<(String, clap::ArgMatches)> {
|
||||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
opentelemetry::global::set_error_handler(|err| {
|
|
||||||
tracing::info!("OpenTelemetry error: {err}");
|
|
||||||
})
|
|
||||||
.expect("global error handler lock poisoned");
|
|
||||||
|
|
||||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
for sig in signals.forever() {
|
for sig in signals.forever() {
|
||||||
@@ -323,19 +317,11 @@ fn wait_spec(
|
|||||||
} else {
|
} else {
|
||||||
spec_set = false;
|
spec_set = false;
|
||||||
}
|
}
|
||||||
let connstr = Url::parse(connstr).context("cannot parse connstr as a URL")?;
|
|
||||||
let conn_conf = postgres::config::Config::from_str(connstr.as_str())
|
|
||||||
.context("cannot build postgres config from connstr")?;
|
|
||||||
let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr.as_str())
|
|
||||||
.context("cannot build tokio postgres config from connstr")?;
|
|
||||||
let compute_node = ComputeNode {
|
let compute_node = ComputeNode {
|
||||||
connstr,
|
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
|
||||||
conn_conf,
|
|
||||||
tokio_conn_conf,
|
|
||||||
pgdata: pgdata.to_string(),
|
pgdata: pgdata.to_string(),
|
||||||
pgbin: pgbin.to_string(),
|
pgbin: pgbin.to_string(),
|
||||||
pgversion: get_pg_version_string(pgbin),
|
pgversion: get_pg_version(pgbin),
|
||||||
http_port,
|
|
||||||
live_config_allowed,
|
live_config_allowed,
|
||||||
state: Mutex::new(new_state),
|
state: Mutex::new(new_state),
|
||||||
state_changed: Condvar::new(),
|
state_changed: Condvar::new(),
|
||||||
@@ -390,6 +376,7 @@ fn wait_spec(
|
|||||||
|
|
||||||
Ok(WaitSpecResult {
|
Ok(WaitSpecResult {
|
||||||
compute,
|
compute,
|
||||||
|
http_port,
|
||||||
resize_swap_on_bind,
|
resize_swap_on_bind,
|
||||||
set_disk_quota_for_fs: set_disk_quota_for_fs.cloned(),
|
set_disk_quota_for_fs: set_disk_quota_for_fs.cloned(),
|
||||||
})
|
})
|
||||||
@@ -397,6 +384,8 @@ fn wait_spec(
|
|||||||
|
|
||||||
struct WaitSpecResult {
|
struct WaitSpecResult {
|
||||||
compute: Arc<ComputeNode>,
|
compute: Arc<ComputeNode>,
|
||||||
|
// passed through from ProcessCliResult
|
||||||
|
http_port: u16,
|
||||||
resize_swap_on_bind: bool,
|
resize_swap_on_bind: bool,
|
||||||
set_disk_quota_for_fs: Option<String>,
|
set_disk_quota_for_fs: Option<String>,
|
||||||
}
|
}
|
||||||
@@ -406,6 +395,7 @@ fn start_postgres(
|
|||||||
#[allow(unused_variables)] matches: &clap::ArgMatches,
|
#[allow(unused_variables)] matches: &clap::ArgMatches,
|
||||||
WaitSpecResult {
|
WaitSpecResult {
|
||||||
compute,
|
compute,
|
||||||
|
http_port,
|
||||||
resize_swap_on_bind,
|
resize_swap_on_bind,
|
||||||
set_disk_quota_for_fs,
|
set_disk_quota_for_fs,
|
||||||
}: WaitSpecResult,
|
}: WaitSpecResult,
|
||||||
@@ -478,10 +468,12 @@ fn start_postgres(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let extension_server_port: u16 = http_port;
|
||||||
|
|
||||||
// Start Postgres
|
// Start Postgres
|
||||||
let mut pg = None;
|
let mut pg = None;
|
||||||
if !prestartup_failed {
|
if !prestartup_failed {
|
||||||
pg = match compute.start_compute() {
|
pg = match compute.start_compute(extension_server_port) {
|
||||||
Ok(pg) => Some(pg),
|
Ok(pg) => Some(pg),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("could not start the compute node: {:#}", err);
|
error!("could not start the compute node: {:#}", err);
|
||||||
|
|||||||
@@ -1,346 +0,0 @@
|
|||||||
//! This program dumps a remote Postgres database into a local Postgres database
|
|
||||||
//! and uploads the resulting PGDATA into object storage for import into a Timeline.
|
|
||||||
//!
|
|
||||||
//! # Context, Architecture, Design
|
|
||||||
//!
|
|
||||||
//! See cloud.git Fast Imports RFC (<https://github.com/neondatabase/cloud/pull/19799>)
|
|
||||||
//! for the full picture.
|
|
||||||
//! The RFC describing the storage pieces of importing the PGDATA dump into a Timeline
|
|
||||||
//! is publicly accessible at <https://github.com/neondatabase/neon/pull/9538>.
|
|
||||||
//!
|
|
||||||
//! # This is a Prototype!
|
|
||||||
//!
|
|
||||||
//! This program is part of a prototype feature and not yet used in production.
|
|
||||||
//!
|
|
||||||
//! The cloud.git RFC contains lots of suggestions for improving e2e throughput
|
|
||||||
//! of this step of the timeline import process.
|
|
||||||
//!
|
|
||||||
//! # Local Testing
|
|
||||||
//!
|
|
||||||
//! - Comment out most of the pgxns in The Dockerfile.compute-tools to speed up the build.
|
|
||||||
//! - Build the image with the following command:
|
|
||||||
//!
|
|
||||||
//! ```bash
|
|
||||||
//! docker buildx build --platform linux/amd64 --build-arg DEBIAN_VERSION=bullseye --build-arg GIT_VERSION=local --build-arg PG_VERSION=v14 --build-arg BUILD_TAG="$(date --iso-8601=s -u)" -t localhost:3030/localregistry/compute-node-v14:latest -f compute/compute-node.Dockerfile .
|
|
||||||
//! docker push localhost:3030/localregistry/compute-node-v14:latest
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use aws_config::BehaviorVersion;
|
|
||||||
use camino::{Utf8Path, Utf8PathBuf};
|
|
||||||
use clap::Parser;
|
|
||||||
use compute_tools::extension_server::{get_pg_version, PostgresMajorVersion};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use tracing::{info, info_span, warn, Instrument};
|
|
||||||
use utils::fs_ext::is_directory_empty;
|
|
||||||
|
|
||||||
#[path = "fast_import/child_stdio_to_log.rs"]
|
|
||||||
mod child_stdio_to_log;
|
|
||||||
#[path = "fast_import/s3_uri.rs"]
|
|
||||||
mod s3_uri;
|
|
||||||
#[path = "fast_import/s5cmd.rs"]
|
|
||||||
mod s5cmd;
|
|
||||||
|
|
||||||
#[derive(clap::Parser)]
|
|
||||||
struct Args {
|
|
||||||
#[clap(long)]
|
|
||||||
working_directory: Utf8PathBuf,
|
|
||||||
#[clap(long, env = "NEON_IMPORTER_S3_PREFIX")]
|
|
||||||
s3_prefix: s3_uri::S3Uri,
|
|
||||||
#[clap(long)]
|
|
||||||
pg_bin_dir: Utf8PathBuf,
|
|
||||||
#[clap(long)]
|
|
||||||
pg_lib_dir: Utf8PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
|
||||||
#[derive(serde::Deserialize)]
|
|
||||||
struct Spec {
|
|
||||||
encryption_secret: EncryptionSecret,
|
|
||||||
#[serde_as(as = "serde_with::base64::Base64")]
|
|
||||||
source_connstring_ciphertext_base64: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(serde::Deserialize)]
|
|
||||||
enum EncryptionSecret {
|
|
||||||
#[allow(clippy::upper_case_acronyms)]
|
|
||||||
KMS { key_id: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
pub(crate) async fn main() -> anyhow::Result<()> {
|
|
||||||
utils::logging::init(
|
|
||||||
utils::logging::LogFormat::Plain,
|
|
||||||
utils::logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
|
|
||||||
utils::logging::Output::Stdout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
info!("starting");
|
|
||||||
|
|
||||||
let Args {
|
|
||||||
working_directory,
|
|
||||||
s3_prefix,
|
|
||||||
pg_bin_dir,
|
|
||||||
pg_lib_dir,
|
|
||||||
} = Args::parse();
|
|
||||||
|
|
||||||
let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
|
|
||||||
|
|
||||||
let spec: Spec = {
|
|
||||||
let spec_key = s3_prefix.append("/spec.json");
|
|
||||||
let s3_client = aws_sdk_s3::Client::new(&aws_config);
|
|
||||||
let object = s3_client
|
|
||||||
.get_object()
|
|
||||||
.bucket(&spec_key.bucket)
|
|
||||||
.key(spec_key.key)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.context("get spec from s3")?
|
|
||||||
.body
|
|
||||||
.collect()
|
|
||||||
.await
|
|
||||||
.context("download spec body")?;
|
|
||||||
serde_json::from_slice(&object.into_bytes()).context("parse spec as json")?
|
|
||||||
};
|
|
||||||
|
|
||||||
match tokio::fs::create_dir(&working_directory).await {
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
|
|
||||||
if !is_directory_empty(&working_directory)
|
|
||||||
.await
|
|
||||||
.context("check if working directory is empty")?
|
|
||||||
{
|
|
||||||
anyhow::bail!("working directory is not empty");
|
|
||||||
} else {
|
|
||||||
// ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => return Err(anyhow::Error::new(e).context("create working directory")),
|
|
||||||
}
|
|
||||||
|
|
||||||
let pgdata_dir = working_directory.join("pgdata");
|
|
||||||
tokio::fs::create_dir(&pgdata_dir)
|
|
||||||
.await
|
|
||||||
.context("create pgdata directory")?;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Setup clients
|
|
||||||
//
|
|
||||||
let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
|
|
||||||
let kms_client = aws_sdk_kms::Client::new(&aws_config);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Initialize pgdata
|
|
||||||
//
|
|
||||||
let pgbin = pg_bin_dir.join("postgres");
|
|
||||||
let pg_version = match get_pg_version(pgbin.as_ref()) {
|
|
||||||
PostgresMajorVersion::V14 => 14,
|
|
||||||
PostgresMajorVersion::V15 => 15,
|
|
||||||
PostgresMajorVersion::V16 => 16,
|
|
||||||
PostgresMajorVersion::V17 => 17,
|
|
||||||
};
|
|
||||||
let superuser = "cloud_admin"; // XXX: this shouldn't be hard-coded
|
|
||||||
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
|
|
||||||
superuser,
|
|
||||||
locale: "en_US.UTF-8", // XXX: this shouldn't be hard-coded,
|
|
||||||
pg_version,
|
|
||||||
initdb_bin: pg_bin_dir.join("initdb").as_ref(),
|
|
||||||
library_search_path: &pg_lib_dir, // TODO: is this right? Prob works in compute image, not sure about neon_local.
|
|
||||||
pgdata: &pgdata_dir,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.context("initdb")?;
|
|
||||||
|
|
||||||
let nproc = num_cpus::get();
|
|
||||||
|
|
||||||
//
|
|
||||||
// Launch postgres process
|
|
||||||
//
|
|
||||||
let mut postgres_proc = tokio::process::Command::new(pgbin)
|
|
||||||
.arg("-D")
|
|
||||||
.arg(&pgdata_dir)
|
|
||||||
.args(["-c", "wal_level=minimal"])
|
|
||||||
.args(["-c", "shared_buffers=10GB"])
|
|
||||||
.args(["-c", "max_wal_senders=0"])
|
|
||||||
.args(["-c", "fsync=off"])
|
|
||||||
.args(["-c", "full_page_writes=off"])
|
|
||||||
.args(["-c", "synchronous_commit=off"])
|
|
||||||
.args(["-c", "maintenance_work_mem=8388608"])
|
|
||||||
.args(["-c", &format!("max_parallel_maintenance_workers={nproc}")])
|
|
||||||
.args(["-c", &format!("max_parallel_workers={nproc}")])
|
|
||||||
.args(["-c", &format!("max_parallel_workers_per_gather={nproc}")])
|
|
||||||
.args(["-c", &format!("max_worker_processes={nproc}")])
|
|
||||||
.args(["-c", "effective_io_concurrency=100"])
|
|
||||||
.env_clear()
|
|
||||||
.stdout(std::process::Stdio::piped())
|
|
||||||
.stderr(std::process::Stdio::piped())
|
|
||||||
.spawn()
|
|
||||||
.context("spawn postgres")?;
|
|
||||||
|
|
||||||
info!("spawned postgres, waiting for it to become ready");
|
|
||||||
tokio::spawn(
|
|
||||||
child_stdio_to_log::relay_process_output(
|
|
||||||
postgres_proc.stdout.take(),
|
|
||||||
postgres_proc.stderr.take(),
|
|
||||||
)
|
|
||||||
.instrument(info_span!("postgres")),
|
|
||||||
);
|
|
||||||
let restore_pg_connstring =
|
|
||||||
format!("host=localhost port=5432 user={superuser} dbname=postgres");
|
|
||||||
loop {
|
|
||||||
let res = tokio_postgres::connect(&restore_pg_connstring, tokio_postgres::NoTls).await;
|
|
||||||
if res.is_ok() {
|
|
||||||
info!("postgres is ready, could connect to it");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Decrypt connection string
|
|
||||||
//
|
|
||||||
let source_connection_string = {
|
|
||||||
match spec.encryption_secret {
|
|
||||||
EncryptionSecret::KMS { key_id } => {
|
|
||||||
let mut output = kms_client
|
|
||||||
.decrypt()
|
|
||||||
.key_id(key_id)
|
|
||||||
.ciphertext_blob(aws_sdk_s3::primitives::Blob::new(
|
|
||||||
spec.source_connstring_ciphertext_base64,
|
|
||||||
))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.context("decrypt source connection string")?;
|
|
||||||
let plaintext = output
|
|
||||||
.plaintext
|
|
||||||
.take()
|
|
||||||
.context("get plaintext source connection string")?;
|
|
||||||
String::from_utf8(plaintext.into_inner())
|
|
||||||
.context("parse source connection string as utf8")?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
|
||||||
// Start the work
|
|
||||||
//
|
|
||||||
|
|
||||||
let dumpdir = working_directory.join("dumpdir");
|
|
||||||
|
|
||||||
let common_args = [
|
|
||||||
// schema mapping (prob suffices to specify them on one side)
|
|
||||||
"--no-owner".to_string(),
|
|
||||||
"--no-privileges".to_string(),
|
|
||||||
"--no-publications".to_string(),
|
|
||||||
"--no-security-labels".to_string(),
|
|
||||||
"--no-subscriptions".to_string(),
|
|
||||||
"--no-tablespaces".to_string(),
|
|
||||||
// format
|
|
||||||
"--format".to_string(),
|
|
||||||
"directory".to_string(),
|
|
||||||
// concurrency
|
|
||||||
"--jobs".to_string(),
|
|
||||||
num_cpus::get().to_string(),
|
|
||||||
// progress updates
|
|
||||||
"--verbose".to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
info!("dump into the working directory");
|
|
||||||
{
|
|
||||||
let mut pg_dump = tokio::process::Command::new(pg_bin_dir.join("pg_dump"))
|
|
||||||
.args(&common_args)
|
|
||||||
.arg("-f")
|
|
||||||
.arg(&dumpdir)
|
|
||||||
.arg("--no-sync")
|
|
||||||
// POSITIONAL args
|
|
||||||
// source db (db name included in connection string)
|
|
||||||
.arg(&source_connection_string)
|
|
||||||
// how we run it
|
|
||||||
.env_clear()
|
|
||||||
.kill_on_drop(true)
|
|
||||||
.stdout(std::process::Stdio::piped())
|
|
||||||
.stderr(std::process::Stdio::piped())
|
|
||||||
.spawn()
|
|
||||||
.context("spawn pg_dump")?;
|
|
||||||
|
|
||||||
info!(pid=%pg_dump.id().unwrap(), "spawned pg_dump");
|
|
||||||
|
|
||||||
tokio::spawn(
|
|
||||||
child_stdio_to_log::relay_process_output(pg_dump.stdout.take(), pg_dump.stderr.take())
|
|
||||||
.instrument(info_span!("pg_dump")),
|
|
||||||
);
|
|
||||||
|
|
||||||
let st = pg_dump.wait().await.context("wait for pg_dump")?;
|
|
||||||
info!(status=?st, "pg_dump exited");
|
|
||||||
if !st.success() {
|
|
||||||
warn!(status=%st, "pg_dump failed, restore will likely fail as well");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: do it in a streaming way, plenty of internal research done on this already
|
|
||||||
// TODO: do the unlogged table trick
|
|
||||||
|
|
||||||
info!("restore from working directory into vanilla postgres");
|
|
||||||
{
|
|
||||||
let mut pg_restore = tokio::process::Command::new(pg_bin_dir.join("pg_restore"))
|
|
||||||
.args(&common_args)
|
|
||||||
.arg("-d")
|
|
||||||
.arg(&restore_pg_connstring)
|
|
||||||
// POSITIONAL args
|
|
||||||
.arg(&dumpdir)
|
|
||||||
// how we run it
|
|
||||||
.env_clear()
|
|
||||||
.kill_on_drop(true)
|
|
||||||
.stdout(std::process::Stdio::piped())
|
|
||||||
.stderr(std::process::Stdio::piped())
|
|
||||||
.spawn()
|
|
||||||
.context("spawn pg_restore")?;
|
|
||||||
|
|
||||||
info!(pid=%pg_restore.id().unwrap(), "spawned pg_restore");
|
|
||||||
tokio::spawn(
|
|
||||||
child_stdio_to_log::relay_process_output(
|
|
||||||
pg_restore.stdout.take(),
|
|
||||||
pg_restore.stderr.take(),
|
|
||||||
)
|
|
||||||
.instrument(info_span!("pg_restore")),
|
|
||||||
);
|
|
||||||
let st = pg_restore.wait().await.context("wait for pg_restore")?;
|
|
||||||
info!(status=?st, "pg_restore exited");
|
|
||||||
if !st.success() {
|
|
||||||
warn!(status=%st, "pg_restore failed, restore will likely fail as well");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("shutdown postgres");
|
|
||||||
{
|
|
||||||
nix::sys::signal::kill(
|
|
||||||
Pid::from_raw(
|
|
||||||
i32::try_from(postgres_proc.id().unwrap()).expect("convert child pid to i32"),
|
|
||||||
),
|
|
||||||
nix::sys::signal::SIGTERM,
|
|
||||||
)
|
|
||||||
.context("signal postgres to shut down")?;
|
|
||||||
postgres_proc
|
|
||||||
.wait()
|
|
||||||
.await
|
|
||||||
.context("wait for postgres to shut down")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("upload pgdata");
|
|
||||||
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
|
|
||||||
.await
|
|
||||||
.context("sync dump directory to destination")?;
|
|
||||||
|
|
||||||
info!("write status");
|
|
||||||
{
|
|
||||||
let status_dir = working_directory.join("status");
|
|
||||||
std::fs::create_dir(&status_dir).context("create status directory")?;
|
|
||||||
let status_file = status_dir.join("status");
|
|
||||||
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
|
||||||
.context("write status file")?;
|
|
||||||
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
|
|
||||||
.await
|
|
||||||
.context("sync status directory to destination")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
|
||||||
use tokio::process::{ChildStderr, ChildStdout};
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
/// Asynchronously relays the output from a child process's `stdout` and `stderr` to the tracing log.
|
|
||||||
/// Each line is read and logged individually, with lossy UTF-8 conversion.
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
///
|
|
||||||
/// * `stdout`: An `Option<ChildStdout>` from the child process.
|
|
||||||
/// * `stderr`: An `Option<ChildStderr>` from the child process.
|
|
||||||
///
|
|
||||||
pub(crate) async fn relay_process_output(stdout: Option<ChildStdout>, stderr: Option<ChildStderr>) {
|
|
||||||
let stdout_fut = async {
|
|
||||||
if let Some(stdout) = stdout {
|
|
||||||
let reader = BufReader::new(stdout);
|
|
||||||
let mut lines = reader.lines();
|
|
||||||
while let Ok(Some(line)) = lines.next_line().await {
|
|
||||||
info!(fd = "stdout", "{}", line);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let stderr_fut = async {
|
|
||||||
if let Some(stderr) = stderr {
|
|
||||||
let reader = BufReader::new(stderr);
|
|
||||||
let mut lines = reader.lines();
|
|
||||||
while let Ok(Some(line)) = lines.next_line().await {
|
|
||||||
info!(fd = "stderr", "{}", line);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
tokio::join!(stdout_fut, stderr_fut);
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
/// Struct to hold parsed S3 components
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct S3Uri {
|
|
||||||
pub bucket: String,
|
|
||||||
pub key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for S3Uri {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
/// Parse an S3 URI into a bucket and key
|
|
||||||
fn from_str(uri: &str) -> Result<Self> {
|
|
||||||
// Ensure the URI starts with "s3://"
|
|
||||||
if !uri.starts_with("s3://") {
|
|
||||||
return Err(anyhow::anyhow!("Invalid S3 URI scheme"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the "s3://" prefix
|
|
||||||
let stripped_uri = &uri[5..];
|
|
||||||
|
|
||||||
// Split the remaining string into bucket and key parts
|
|
||||||
if let Some((bucket, key)) = stripped_uri.split_once('/') {
|
|
||||||
Ok(S3Uri {
|
|
||||||
bucket: bucket.to_string(),
|
|
||||||
key: key.to_string(),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Err(anyhow::anyhow!(
|
|
||||||
"Invalid S3 URI format, missing bucket or key"
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl S3Uri {
|
|
||||||
pub fn append(&self, suffix: &str) -> Self {
|
|
||||||
Self {
|
|
||||||
bucket: self.bucket.clone(),
|
|
||||||
key: format!("{}{}", self.key, suffix),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for S3Uri {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
write!(f, "s3://{}/{}", self.bucket, self.key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl clap::builder::TypedValueParser for S3Uri {
|
|
||||||
type Value = Self;
|
|
||||||
|
|
||||||
fn parse_ref(
|
|
||||||
&self,
|
|
||||||
_cmd: &clap::Command,
|
|
||||||
_arg: Option<&clap::Arg>,
|
|
||||||
value: &std::ffi::OsStr,
|
|
||||||
) -> Result<Self::Value, clap::Error> {
|
|
||||||
let value_str = value.to_str().ok_or_else(|| {
|
|
||||||
clap::Error::raw(
|
|
||||||
clap::error::ErrorKind::InvalidUtf8,
|
|
||||||
"Invalid UTF-8 sequence",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
S3Uri::from_str(value_str).map_err(|e| {
|
|
||||||
clap::Error::raw(
|
|
||||||
clap::error::ErrorKind::InvalidValue,
|
|
||||||
format!("Failed to parse S3 URI: {}", e),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
use anyhow::Context;
|
|
||||||
use camino::Utf8Path;
|
|
||||||
|
|
||||||
use super::s3_uri::S3Uri;
|
|
||||||
|
|
||||||
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
|
||||||
let mut builder = tokio::process::Command::new("s5cmd");
|
|
||||||
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
|
|
||||||
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
|
|
||||||
builder.arg("--endpoint-url").arg(val);
|
|
||||||
}
|
|
||||||
builder
|
|
||||||
.arg("sync")
|
|
||||||
.arg(local.as_str())
|
|
||||||
.arg(remote.to_string());
|
|
||||||
let st = builder
|
|
||||||
.spawn()
|
|
||||||
.context("spawn s5cmd")?
|
|
||||||
.wait()
|
|
||||||
.await
|
|
||||||
.context("wait for s5cmd")?;
|
|
||||||
if st.success() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(anyhow::anyhow!("s5cmd failed"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,37 +1,38 @@
|
|||||||
|
use compute_api::{
|
||||||
|
responses::CatalogObjects,
|
||||||
|
spec::{Database, Role},
|
||||||
|
};
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use postgres::NoTls;
|
use postgres::{Client, NoTls};
|
||||||
use std::{path::Path, process::Stdio, result::Result, sync::Arc};
|
use std::{path::Path, process::Stdio, result::Result, sync::Arc};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{AsyncBufReadExt, BufReader},
|
io::{AsyncBufReadExt, BufReader},
|
||||||
process::Command,
|
process::Command,
|
||||||
spawn,
|
task,
|
||||||
};
|
};
|
||||||
use tokio_stream::{self as stream, StreamExt};
|
use tokio_stream::{self as stream, StreamExt};
|
||||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::{
|
||||||
use crate::pg_helpers::{get_existing_dbs_async, get_existing_roles_async, postgres_conf_for_db};
|
compute::ComputeNode,
|
||||||
use compute_api::responses::CatalogObjects;
|
pg_helpers::{get_existing_dbs, get_existing_roles},
|
||||||
|
};
|
||||||
|
|
||||||
pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<CatalogObjects> {
|
pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<CatalogObjects> {
|
||||||
let conf = compute.get_tokio_conn_conf(Some("compute_ctl:get_dbs_and_roles"));
|
let connstr = compute.connstr.clone();
|
||||||
let (client, connection): (tokio_postgres::Client, _) = conf.connect(NoTls).await?;
|
task::spawn_blocking(move || {
|
||||||
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
spawn(async move {
|
let roles: Vec<Role>;
|
||||||
if let Err(e) = connection.await {
|
{
|
||||||
eprintln!("connection error: {}", e);
|
let mut xact = client.transaction()?;
|
||||||
|
roles = get_existing_roles(&mut xact)?;
|
||||||
}
|
}
|
||||||
});
|
let databases: Vec<Database> = get_existing_dbs(&mut client)?.values().cloned().collect();
|
||||||
|
|
||||||
let roles = get_existing_roles_async(&client).await?;
|
Ok(CatalogObjects { roles, databases })
|
||||||
|
})
|
||||||
let databases = get_existing_dbs_async(&client)
|
.await?
|
||||||
.await?
|
|
||||||
.into_values()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(CatalogObjects { roles, databases })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
@@ -40,8 +41,6 @@ pub enum SchemaDumpError {
|
|||||||
DatabaseDoesNotExist,
|
DatabaseDoesNotExist,
|
||||||
#[error("Failed to execute pg_dump.")]
|
#[error("Failed to execute pg_dump.")]
|
||||||
IO(#[from] std::io::Error),
|
IO(#[from] std::io::Error),
|
||||||
#[error("Unexpected error.")]
|
|
||||||
Unexpected,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// It uses the pg_dump utility to dump the schema of the specified database.
|
// It uses the pg_dump utility to dump the schema of the specified database.
|
||||||
@@ -59,38 +58,11 @@ pub async fn get_database_schema(
|
|||||||
let pgbin = &compute.pgbin;
|
let pgbin = &compute.pgbin;
|
||||||
let basepath = Path::new(pgbin).parent().unwrap();
|
let basepath = Path::new(pgbin).parent().unwrap();
|
||||||
let pgdump = basepath.join("pg_dump");
|
let pgdump = basepath.join("pg_dump");
|
||||||
|
let mut connstr = compute.connstr.clone();
|
||||||
// Replace the DB in the connection string and disable it to parts.
|
connstr.set_path(dbname);
|
||||||
// This is the only option to handle DBs with special characters.
|
|
||||||
let conf =
|
|
||||||
postgres_conf_for_db(&compute.connstr, dbname).map_err(|_| SchemaDumpError::Unexpected)?;
|
|
||||||
let host = conf
|
|
||||||
.get_hosts()
|
|
||||||
.first()
|
|
||||||
.ok_or(SchemaDumpError::Unexpected)?;
|
|
||||||
let host = match host {
|
|
||||||
tokio_postgres::config::Host::Tcp(ip) => ip.to_string(),
|
|
||||||
#[cfg(unix)]
|
|
||||||
tokio_postgres::config::Host::Unix(path) => path.to_string_lossy().to_string(),
|
|
||||||
};
|
|
||||||
let port = conf
|
|
||||||
.get_ports()
|
|
||||||
.first()
|
|
||||||
.ok_or(SchemaDumpError::Unexpected)?;
|
|
||||||
let user = conf.get_user().ok_or(SchemaDumpError::Unexpected)?;
|
|
||||||
let dbname = conf.get_dbname().ok_or(SchemaDumpError::Unexpected)?;
|
|
||||||
|
|
||||||
let mut cmd = Command::new(pgdump)
|
let mut cmd = Command::new(pgdump)
|
||||||
// XXX: this seems to be the only option to deal with DBs with `=` in the name
|
|
||||||
// See <https://www.postgresql.org/message-id/flat/20151023003445.931.91267%40wrigleys.postgresql.org>
|
|
||||||
.env("PGDATABASE", dbname)
|
|
||||||
.arg("--host")
|
|
||||||
.arg(host)
|
|
||||||
.arg("--port")
|
|
||||||
.arg(port.to_string())
|
|
||||||
.arg("--username")
|
|
||||||
.arg(user)
|
|
||||||
.arg("--schema-only")
|
.arg("--schema-only")
|
||||||
|
.arg(connstr.as_str())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.kill_on_drop(true)
|
.kill_on_drop(true)
|
||||||
|
|||||||
@@ -1,16 +1,43 @@
|
|||||||
use anyhow::{anyhow, Ok, Result};
|
use anyhow::{anyhow, Ok, Result};
|
||||||
|
use postgres::Client;
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{error, instrument, warn};
|
use tracing::{error, instrument, warn};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
/// Create a special service table for availability checks
|
||||||
|
/// only if it does not exist already.
|
||||||
|
pub fn create_availability_check_data(client: &mut Client) -> Result<()> {
|
||||||
|
let query = "
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_catalog.pg_tables
|
||||||
|
WHERE tablename = 'health_check'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
CREATE TABLE health_check (
|
||||||
|
id serial primary key,
|
||||||
|
updated_at timestamptz default now()
|
||||||
|
);
|
||||||
|
INSERT INTO health_check VALUES (1, now())
|
||||||
|
ON CONFLICT (id) DO UPDATE
|
||||||
|
SET updated_at = now();
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;";
|
||||||
|
client.execute(query, &[])?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Update timestamp in a row in a special service table to check
|
/// Update timestamp in a row in a special service table to check
|
||||||
/// that we can actually write some data in this particular timeline.
|
/// that we can actually write some data in this particular timeline.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
// Connect to the database.
|
// Connect to the database.
|
||||||
let conf = compute.get_tokio_conn_conf(Some("compute_ctl:availability_checker"));
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
let (client, connection) = conf.connect(NoTls).await?;
|
|
||||||
if client.is_closed() {
|
if client.is_closed() {
|
||||||
return Err(anyhow!("connection to postgres closed"));
|
return Err(anyhow!("connection to postgres closed"));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,28 +1,26 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::iter::once;
|
|
||||||
use std::os::unix::fs::{symlink, PermissionsExt};
|
use std::os::unix::fs::{symlink, PermissionsExt};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::atomic::AtomicU32;
|
use std::sync::atomic::AtomicU32;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::{Arc, Condvar, Mutex, RwLock};
|
use std::sync::{Condvar, Mutex, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use compute_api::spec::{PgIdent, Role};
|
use compute_api::spec::PgIdent;
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use nix::unistd::Pid;
|
use nix::unistd::Pid;
|
||||||
use postgres;
|
|
||||||
use postgres::error::SqlState;
|
use postgres::error::SqlState;
|
||||||
use postgres::NoTls;
|
use postgres::{Client, NoTls};
|
||||||
use tracing::{debug, error, info, instrument, warn};
|
use tracing::{debug, error, info, instrument, warn};
|
||||||
use utils::id::{TenantId, TimelineId};
|
use utils::id::{TenantId, TimelineId};
|
||||||
use utils::lsn::Lsn;
|
use utils::lsn::Lsn;
|
||||||
@@ -33,22 +31,15 @@ use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
|
|||||||
use utils::measured_stream::MeasuredReader;
|
use utils::measured_stream::MeasuredReader;
|
||||||
|
|
||||||
use nix::sys::signal::{kill, Signal};
|
use nix::sys::signal::{kill, Signal};
|
||||||
use remote_storage::{DownloadError, RemotePath};
|
|
||||||
use tokio::spawn;
|
|
||||||
|
|
||||||
use crate::installed_extensions::get_installed_extensions;
|
use remote_storage::{DownloadError, RemotePath};
|
||||||
|
|
||||||
|
use crate::checker::create_availability_check_data;
|
||||||
|
use crate::installed_extensions::get_installed_extensions_sync;
|
||||||
use crate::local_proxy;
|
use crate::local_proxy;
|
||||||
|
use crate::logger::inlinify;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
use crate::spec_apply::ApplySpecPhase::{
|
|
||||||
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSuperUser,
|
|
||||||
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
|
||||||
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
|
||||||
};
|
|
||||||
use crate::spec_apply::PerDatabasePhase::{
|
|
||||||
ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
|
|
||||||
};
|
|
||||||
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
|
||||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||||
use crate::{config, extension_server};
|
use crate::{config, extension_server};
|
||||||
|
|
||||||
@@ -59,10 +50,6 @@ pub static PG_PID: AtomicU32 = AtomicU32::new(0);
|
|||||||
pub struct ComputeNode {
|
pub struct ComputeNode {
|
||||||
// Url type maintains proper escaping
|
// Url type maintains proper escaping
|
||||||
pub connstr: url::Url,
|
pub connstr: url::Url,
|
||||||
// We connect to Postgres from many different places, so build configs once
|
|
||||||
// and reuse them where needed.
|
|
||||||
pub conn_conf: postgres::config::Config,
|
|
||||||
pub tokio_conn_conf: tokio_postgres::config::Config,
|
|
||||||
pub pgdata: String,
|
pub pgdata: String,
|
||||||
pub pgbin: String,
|
pub pgbin: String,
|
||||||
pub pgversion: String,
|
pub pgversion: String,
|
||||||
@@ -79,8 +66,6 @@ pub struct ComputeNode {
|
|||||||
/// - we push spec and it does configuration
|
/// - we push spec and it does configuration
|
||||||
/// - but then it is restarted without any spec again
|
/// - but then it is restarted without any spec again
|
||||||
pub live_config_allowed: bool,
|
pub live_config_allowed: bool,
|
||||||
/// The port that the compute's HTTP server listens on
|
|
||||||
pub http_port: u16,
|
|
||||||
/// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
|
/// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
|
||||||
/// To allow HTTP API server to serving status requests, while configuration
|
/// To allow HTTP API server to serving status requests, while configuration
|
||||||
/// is in progress, lock should be held only for short periods of time to do
|
/// is in progress, lock should be held only for short periods of time to do
|
||||||
@@ -239,7 +224,10 @@ fn maybe_cgexec(cmd: &str) -> Command {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
|
/// Create special neon_superuser role, that's a slightly nerfed version of a real superuser
|
||||||
|
/// that we give to customers
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
let roles = spec
|
let roles = spec
|
||||||
.cluster
|
.cluster
|
||||||
.roles
|
.roles
|
||||||
@@ -308,8 +296,11 @@ pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
|
|||||||
$$;"#,
|
$$;"#,
|
||||||
roles_decl, database_decl,
|
roles_decl, database_decl,
|
||||||
);
|
);
|
||||||
|
info!("Neon superuser created: {}", inlinify(&query));
|
||||||
query
|
client
|
||||||
|
.simple_query(&query)
|
||||||
|
.map_err(|e| anyhow::anyhow!(e).context(query))?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ComputeNode {
|
impl ComputeNode {
|
||||||
@@ -613,7 +604,11 @@ impl ComputeNode {
|
|||||||
/// Do all the preparations like PGDATA directory creation, configuration,
|
/// Do all the preparations like PGDATA directory creation, configuration,
|
||||||
/// safekeepers sync, basebackup, etc.
|
/// safekeepers sync, basebackup, etc.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
|
pub fn prepare_pgdata(
|
||||||
|
&self,
|
||||||
|
compute_state: &ComputeState,
|
||||||
|
extension_server_port: u16,
|
||||||
|
) -> Result<()> {
|
||||||
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
let spec = &pspec.spec;
|
let spec = &pspec.spec;
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
@@ -623,7 +618,7 @@ impl ComputeNode {
|
|||||||
config::write_postgres_conf(
|
config::write_postgres_conf(
|
||||||
&pgdata_path.join("postgresql.conf"),
|
&pgdata_path.join("postgresql.conf"),
|
||||||
&pspec.spec,
|
&pspec.spec,
|
||||||
self.http_port,
|
Some(extension_server_port),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Syncing safekeepers is only safe with primary nodes: if a primary
|
// Syncing safekeepers is only safe with primary nodes: if a primary
|
||||||
@@ -803,10 +798,10 @@ impl ComputeNode {
|
|||||||
/// version. In the future, it may upgrade all 3rd-party extensions.
|
/// version. In the future, it may upgrade all 3rd-party extensions.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn post_apply_config(&self) -> Result<()> {
|
pub fn post_apply_config(&self) -> Result<()> {
|
||||||
let conf = self.get_conn_conf(Some("compute_ctl:post_apply_config"));
|
let connstr = self.connstr.clone();
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
let func = || {
|
let func = || {
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
handle_neon_extension_upgrade(&mut client)
|
handle_neon_extension_upgrade(&mut client)
|
||||||
.context("handle_neon_extension_upgrade")?;
|
.context("handle_neon_extension_upgrade")?;
|
||||||
Ok::<_, anyhow::Error>(())
|
Ok::<_, anyhow::Error>(())
|
||||||
@@ -818,51 +813,40 @@ impl ComputeNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
|
/// Do initial configuration of the already started Postgres.
|
||||||
let mut conf = self.conn_conf.clone();
|
#[instrument(skip_all)]
|
||||||
if let Some(application_name) = application_name {
|
pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
|
||||||
conf.application_name(application_name);
|
// If connection fails,
|
||||||
}
|
// it may be the old node with `zenith_admin` superuser.
|
||||||
conf
|
//
|
||||||
}
|
// In this case we need to connect with old `zenith_admin` name
|
||||||
|
// and create new user. We cannot simply rename connected user,
|
||||||
|
// but we can create a new one and grant it all privileges.
|
||||||
|
let mut connstr = self.connstr.clone();
|
||||||
|
connstr
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("application_name", "apply_config");
|
||||||
|
|
||||||
pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
|
let mut client = match Client::connect(connstr.as_str(), NoTls) {
|
||||||
let mut conf = self.tokio_conn_conf.clone();
|
|
||||||
if let Some(application_name) = application_name {
|
|
||||||
conf.application_name(application_name);
|
|
||||||
}
|
|
||||||
conf
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_maintenance_client(
|
|
||||||
conf: &tokio_postgres::Config,
|
|
||||||
) -> Result<tokio_postgres::Client> {
|
|
||||||
let mut conf = conf.clone();
|
|
||||||
conf.application_name("compute_ctl:apply_config");
|
|
||||||
|
|
||||||
let (client, conn) = match conf.connect(NoTls).await {
|
|
||||||
// If connection fails, it may be the old node with `zenith_admin` superuser.
|
|
||||||
//
|
|
||||||
// In this case we need to connect with old `zenith_admin` name
|
|
||||||
// and create new user. We cannot simply rename connected user,
|
|
||||||
// but we can create a new one and grant it all privileges.
|
|
||||||
Err(e) => match e.code() {
|
Err(e) => match e.code() {
|
||||||
Some(&SqlState::INVALID_PASSWORD)
|
Some(&SqlState::INVALID_PASSWORD)
|
||||||
| Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
|
| Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
|
||||||
// Connect with zenith_admin if cloud_admin could not authenticate
|
// connect with zenith_admin if cloud_admin could not authenticate
|
||||||
info!(
|
info!(
|
||||||
"cannot connect to postgres: {}, retrying with `zenith_admin` username",
|
"cannot connect to postgres: {}, retrying with `zenith_admin` username",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
|
let mut zenith_admin_connstr = connstr.clone();
|
||||||
zenith_admin_conf.application_name("compute_ctl:apply_config");
|
|
||||||
zenith_admin_conf.user("zenith_admin");
|
zenith_admin_connstr
|
||||||
|
.set_username("zenith_admin")
|
||||||
|
.map_err(|_| anyhow::anyhow!("invalid connstr"))?;
|
||||||
|
|
||||||
let mut client =
|
let mut client =
|
||||||
zenith_admin_conf.connect(NoTls)
|
Client::connect(zenith_admin_connstr.as_str(), NoTls)
|
||||||
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
|
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
|
||||||
|
|
||||||
// Disable forwarding so that users don't get a cloud_admin role
|
// Disable forwarding so that users don't get a cloud_admin role
|
||||||
|
|
||||||
let mut func = || {
|
let mut func = || {
|
||||||
client.simple_query("SET neon.forward_ddl = false")?;
|
client.simple_query("SET neon.forward_ddl = false")?;
|
||||||
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
|
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
|
||||||
@@ -873,319 +857,58 @@ impl ComputeNode {
|
|||||||
|
|
||||||
drop(client);
|
drop(client);
|
||||||
|
|
||||||
// Reconnect with connstring with expected name
|
// reconnect with connstring with expected name
|
||||||
conf.connect(NoTls).await?
|
Client::connect(connstr.as_str(), NoTls)?
|
||||||
}
|
}
|
||||||
_ => return Err(e.into()),
|
_ => return Err(e.into()),
|
||||||
},
|
},
|
||||||
Ok((client, conn)) => (client, conn),
|
Ok(client) => client,
|
||||||
};
|
};
|
||||||
|
|
||||||
spawn(async move {
|
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
||||||
if let Err(e) = conn.await {
|
|
||||||
error!("maintenance client connection error: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Disable DDL forwarding because control plane already knows about the roles/databases
|
|
||||||
// we're about to modify.
|
|
||||||
client
|
client
|
||||||
.simple_query("SET neon.forward_ddl = false")
|
.simple_query("SET neon.forward_ddl = false")
|
||||||
.await
|
|
||||||
.context("apply_config SET neon.forward_ddl = false")?;
|
.context("apply_config SET neon.forward_ddl = false")?;
|
||||||
|
|
||||||
Ok(client)
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
}
|
let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
|
||||||
|
create_neon_superuser(spec, &mut client).context("apply_config create_neon_superuser")?;
|
||||||
|
cleanup_instance(&mut client).context("apply_config cleanup_instance")?;
|
||||||
|
handle_roles(spec, &mut client).context("apply_config handle_roles")?;
|
||||||
|
handle_databases(spec, &mut client).context("apply_config handle_databases")?;
|
||||||
|
handle_role_deletions(spec, connstr.as_str(), &mut client)
|
||||||
|
.context("apply_config handle_role_deletions")?;
|
||||||
|
handle_grants(
|
||||||
|
spec,
|
||||||
|
&mut client,
|
||||||
|
connstr.as_str(),
|
||||||
|
self.has_feature(ComputeFeature::AnonExtension),
|
||||||
|
)
|
||||||
|
.context("apply_config handle_grants")?;
|
||||||
|
handle_extensions(spec, &mut client).context("apply_config handle_extensions")?;
|
||||||
|
handle_extension_neon(&mut client).context("apply_config handle_extension_neon")?;
|
||||||
|
create_availability_check_data(&mut client)
|
||||||
|
.context("apply_config create_availability_check_data")?;
|
||||||
|
|
||||||
/// Apply the spec to the running PostgreSQL instance.
|
// 'Close' connection
|
||||||
/// The caller can decide to run with multiple clients in parallel, or
|
drop(client);
|
||||||
/// single mode. Either way, the commands executed will be the same, and
|
|
||||||
/// only commands run in different databases are parallelized.
|
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn apply_spec_sql(
|
|
||||||
&self,
|
|
||||||
spec: Arc<ComputeSpec>,
|
|
||||||
conf: Arc<tokio_postgres::Config>,
|
|
||||||
concurrency: usize,
|
|
||||||
) -> Result<()> {
|
|
||||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()?;
|
|
||||||
|
|
||||||
info!("Applying config with max {} concurrency", concurrency);
|
if let Some(ref local_proxy) = spec.local_proxy_config {
|
||||||
debug!("Config: {:?}", spec);
|
|
||||||
|
|
||||||
rt.block_on(async {
|
|
||||||
// Proceed with post-startup configuration. Note, that order of operations is important.
|
|
||||||
let client = Self::get_maintenance_client(&conf).await?;
|
|
||||||
let spec = spec.clone();
|
|
||||||
|
|
||||||
let databases = get_existing_dbs_async(&client).await?;
|
|
||||||
let roles = get_existing_roles_async(&client)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|role| (role.name.clone(), role))
|
|
||||||
.collect::<HashMap<String, Role>>();
|
|
||||||
|
|
||||||
let jwks_roles = Arc::new(
|
|
||||||
spec.as_ref()
|
|
||||||
.local_proxy_config
|
|
||||||
.iter()
|
|
||||||
.flat_map(|it| &it.jwks)
|
|
||||||
.flatten()
|
|
||||||
.flat_map(|setting| &setting.role_names)
|
|
||||||
.cloned()
|
|
||||||
.collect::<HashSet<_>>(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let ctx = Arc::new(tokio::sync::RwLock::new(MutableApplyContext {
|
|
||||||
roles,
|
|
||||||
dbs: databases,
|
|
||||||
}));
|
|
||||||
|
|
||||||
for phase in [
|
|
||||||
CreateSuperUser,
|
|
||||||
DropInvalidDatabases,
|
|
||||||
RenameRoles,
|
|
||||||
CreateAndAlterRoles,
|
|
||||||
RenameAndDeleteDatabases,
|
|
||||||
CreateAndAlterDatabases,
|
|
||||||
] {
|
|
||||||
info!("Applying phase {:?}", &phase);
|
|
||||||
apply_operations(
|
|
||||||
spec.clone(),
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
phase,
|
|
||||||
|| async { Ok(&client) },
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Applying RunInEachDatabase phase");
|
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
|
||||||
|
|
||||||
let db_processes = spec
|
|
||||||
.cluster
|
|
||||||
.databases
|
|
||||||
.iter()
|
|
||||||
.map(|db| DB::new(db.clone()))
|
|
||||||
// include
|
|
||||||
.chain(once(DB::SystemDB))
|
|
||||||
.map(|db| {
|
|
||||||
let spec = spec.clone();
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
let jwks_roles = jwks_roles.clone();
|
|
||||||
let mut conf = conf.as_ref().clone();
|
|
||||||
let concurrency_token = concurrency_token.clone();
|
|
||||||
let db = db.clone();
|
|
||||||
|
|
||||||
debug!("Applying per-database phases for Database {:?}", &db);
|
|
||||||
|
|
||||||
match &db {
|
|
||||||
DB::SystemDB => {}
|
|
||||||
DB::UserDB(db) => {
|
|
||||||
conf.dbname(db.name.as_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let conf = Arc::new(conf);
|
|
||||||
let fut = Self::apply_spec_sql_db(
|
|
||||||
spec.clone(),
|
|
||||||
conf,
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
concurrency_token.clone(),
|
|
||||||
db,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(spawn(fut))
|
|
||||||
})
|
|
||||||
.collect::<Vec<Result<_, anyhow::Error>>>();
|
|
||||||
|
|
||||||
for process in db_processes.into_iter() {
|
|
||||||
let handle = process?;
|
|
||||||
handle.await??;
|
|
||||||
}
|
|
||||||
|
|
||||||
for phase in vec![
|
|
||||||
HandleOtherExtensions,
|
|
||||||
HandleNeonExtension,
|
|
||||||
CreateAvailabilityCheck,
|
|
||||||
DropRoles,
|
|
||||||
] {
|
|
||||||
debug!("Applying phase {:?}", &phase);
|
|
||||||
apply_operations(
|
|
||||||
spec.clone(),
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
phase,
|
|
||||||
|| async { Ok(&client) },
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply SQL migrations of the RunInEachDatabase phase.
|
|
||||||
///
|
|
||||||
/// May opt to not connect to databases that don't have any scheduled
|
|
||||||
/// operations. The function is concurrency-controlled with the provided
|
|
||||||
/// semaphore. The caller has to make sure the semaphore isn't exhausted.
|
|
||||||
async fn apply_spec_sql_db(
|
|
||||||
spec: Arc<ComputeSpec>,
|
|
||||||
conf: Arc<tokio_postgres::Config>,
|
|
||||||
ctx: Arc<tokio::sync::RwLock<MutableApplyContext>>,
|
|
||||||
jwks_roles: Arc<HashSet<String>>,
|
|
||||||
concurrency_token: Arc<tokio::sync::Semaphore>,
|
|
||||||
db: DB,
|
|
||||||
) -> Result<()> {
|
|
||||||
let _permit = concurrency_token.acquire().await?;
|
|
||||||
|
|
||||||
let mut client_conn = None;
|
|
||||||
|
|
||||||
for subphase in [
|
|
||||||
DeleteDBRoleReferences,
|
|
||||||
ChangeSchemaPerms,
|
|
||||||
HandleAnonExtension,
|
|
||||||
] {
|
|
||||||
apply_operations(
|
|
||||||
spec.clone(),
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
RunInEachDatabase {
|
|
||||||
db: db.clone(),
|
|
||||||
subphase,
|
|
||||||
},
|
|
||||||
// Only connect if apply_operation actually wants a connection.
|
|
||||||
// It's quite possible this database doesn't need any queries,
|
|
||||||
// so by not connecting we save time and effort connecting to
|
|
||||||
// that database.
|
|
||||||
|| async {
|
|
||||||
if client_conn.is_none() {
|
|
||||||
let db_client = Self::get_maintenance_client(&conf).await?;
|
|
||||||
client_conn.replace(db_client);
|
|
||||||
}
|
|
||||||
let client = client_conn.as_ref().unwrap();
|
|
||||||
Ok(client)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(client_conn);
|
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Choose how many concurrent connections to use for applying the spec changes.
|
|
||||||
pub fn max_service_connections(
|
|
||||||
&self,
|
|
||||||
compute_state: &ComputeState,
|
|
||||||
spec: &ComputeSpec,
|
|
||||||
) -> usize {
|
|
||||||
// If the cluster is in Init state we don't have to deal with user connections,
|
|
||||||
// and can thus use all `max_connections` connection slots. However, that's generally not
|
|
||||||
// very efficient, so we generally still limit it to a smaller number.
|
|
||||||
if compute_state.status == ComputeStatus::Init {
|
|
||||||
// If the settings contain 'max_connections', use that as template
|
|
||||||
if let Some(config) = spec.cluster.settings.find("max_connections") {
|
|
||||||
config.parse::<usize>().ok()
|
|
||||||
} else {
|
|
||||||
// Otherwise, try to find the setting in the postgresql_conf string
|
|
||||||
spec.cluster
|
|
||||||
.postgresql_conf
|
|
||||||
.iter()
|
|
||||||
.flat_map(|conf| conf.split("\n"))
|
|
||||||
.filter_map(|line| {
|
|
||||||
if !line.contains("max_connections") {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (key, value) = line.split_once("=")?;
|
|
||||||
let key = key
|
|
||||||
.trim_start_matches(char::is_whitespace)
|
|
||||||
.trim_end_matches(char::is_whitespace);
|
|
||||||
|
|
||||||
let value = value
|
|
||||||
.trim_start_matches(char::is_whitespace)
|
|
||||||
.trim_end_matches(char::is_whitespace);
|
|
||||||
|
|
||||||
if key != "max_connections" {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
value.parse::<usize>().ok()
|
|
||||||
})
|
|
||||||
.next()
|
|
||||||
}
|
|
||||||
// If max_connections is present, use at most 1/3rd of that.
|
|
||||||
// When max_connections is lower than 30, try to use at least 10 connections, but
|
|
||||||
// never more than max_connections.
|
|
||||||
.map(|limit| match limit {
|
|
||||||
0..10 => limit,
|
|
||||||
10..30 => 10,
|
|
||||||
30.. => limit / 3,
|
|
||||||
})
|
|
||||||
// If we didn't find max_connections, default to 10 concurrent connections.
|
|
||||||
.unwrap_or(10)
|
|
||||||
} else {
|
|
||||||
// state == Running
|
|
||||||
// Because the cluster is already in the Running state, we should assume users are
|
|
||||||
// already connected to the cluster, and high concurrency could negatively
|
|
||||||
// impact user connectivity. Therefore, we can limit concurrency to the number of
|
|
||||||
// reserved superuser connections, which users wouldn't be able to use anyway.
|
|
||||||
spec.cluster
|
|
||||||
.settings
|
|
||||||
.find("superuser_reserved_connections")
|
|
||||||
.iter()
|
|
||||||
.filter_map(|val| val.parse::<usize>().ok())
|
|
||||||
.map(|val| if val > 1 { val - 1 } else { 1 })
|
|
||||||
.last()
|
|
||||||
.unwrap_or(3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Do initial configuration of the already started Postgres.
|
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
|
|
||||||
let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
|
|
||||||
|
|
||||||
let conf = Arc::new(conf);
|
|
||||||
let spec = Arc::new(
|
|
||||||
compute_state
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.expect("spec must be set")
|
|
||||||
.spec
|
|
||||||
.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
|
|
||||||
|
|
||||||
// Merge-apply spec & changes to PostgreSQL state.
|
|
||||||
self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
|
|
||||||
|
|
||||||
if let Some(ref local_proxy) = &spec.clone().local_proxy_config {
|
|
||||||
info!("configuring local_proxy");
|
info!("configuring local_proxy");
|
||||||
local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
|
local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run migrations separately to not hold up cold starts
|
// Run migrations separately to not hold up cold starts
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
let conf = conf.as_ref().clone();
|
let mut connstr = connstr.clone();
|
||||||
let mut conf = postgres::config::Config::from(conf);
|
connstr
|
||||||
conf.application_name("compute_ctl:migrations");
|
.query_pairs_mut()
|
||||||
|
.append_pair("application_name", "migrations");
|
||||||
|
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
handle_migrations(&mut client).context("apply_config handle_migrations")
|
handle_migrations(&mut client).context("apply_config handle_migrations")
|
||||||
});
|
});
|
||||||
|
Ok(())
|
||||||
Ok::<(), anyhow::Error>(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapped this around `pg_ctl reload`, but right now we don't use
|
// Wrapped this around `pg_ctl reload`, but right now we don't use
|
||||||
@@ -1241,26 +964,40 @@ impl ComputeNode {
|
|||||||
// Write new config
|
// Write new config
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
let postgresql_conf_path = pgdata_path.join("postgresql.conf");
|
let postgresql_conf_path = pgdata_path.join("postgresql.conf");
|
||||||
config::write_postgres_conf(&postgresql_conf_path, &spec, self.http_port)?;
|
config::write_postgres_conf(&postgresql_conf_path, &spec, None)?;
|
||||||
|
// temporarily reset max_cluster_size in config
|
||||||
let max_concurrent_connections = spec.reconfigure_concurrency;
|
|
||||||
|
|
||||||
// Temporarily reset max_cluster_size in config
|
|
||||||
// to avoid the possibility of hitting the limit, while we are reconfiguring:
|
// to avoid the possibility of hitting the limit, while we are reconfiguring:
|
||||||
// creating new extensions, roles, etc.
|
// creating new extensions, roles, etc...
|
||||||
config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
|
config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
|
||||||
self.pg_reload_conf()?;
|
self.pg_reload_conf()?;
|
||||||
|
|
||||||
|
let mut client = Client::connect(self.connstr.as_str(), NoTls)?;
|
||||||
|
|
||||||
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
|
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
||||||
if spec.mode == ComputeMode::Primary {
|
if spec.mode == ComputeMode::Primary {
|
||||||
let mut conf = tokio_postgres::Config::from_str(self.connstr.as_str()).unwrap();
|
client.simple_query("SET neon.forward_ddl = false")?;
|
||||||
conf.application_name("apply_config");
|
cleanup_instance(&mut client)?;
|
||||||
let conf = Arc::new(conf);
|
handle_roles(&spec, &mut client)?;
|
||||||
|
handle_databases(&spec, &mut client)?;
|
||||||
let spec = Arc::new(spec.clone());
|
handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
|
||||||
|
handle_grants(
|
||||||
self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
|
&spec,
|
||||||
|
&mut client,
|
||||||
|
self.connstr.as_str(),
|
||||||
|
self.has_feature(ComputeFeature::AnonExtension),
|
||||||
|
)?;
|
||||||
|
handle_extensions(&spec, &mut client)?;
|
||||||
|
handle_extension_neon(&mut client)?;
|
||||||
|
// We can skip handle_migrations here because a new migration can only appear
|
||||||
|
// if we have a new version of the compute_ctl binary, which can only happen
|
||||||
|
// if compute got restarted, in which case we'll end up inside of apply_config
|
||||||
|
// instead of reconfigure.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 'Close' connection
|
||||||
|
drop(client);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -1277,7 +1014,10 @@ impl ComputeNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn start_compute(&self) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
|
pub fn start_compute(
|
||||||
|
&self,
|
||||||
|
extension_server_port: u16,
|
||||||
|
) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
|
||||||
let compute_state = self.state.lock().unwrap().clone();
|
let compute_state = self.state.lock().unwrap().clone();
|
||||||
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
info!(
|
info!(
|
||||||
@@ -1352,7 +1092,7 @@ impl ComputeNode {
|
|||||||
info!("{:?}", remote_ext_metrics);
|
info!("{:?}", remote_ext_metrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.prepare_pgdata(&compute_state)?;
|
self.prepare_pgdata(&compute_state, extension_server_port)?;
|
||||||
|
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
|
let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
|
||||||
@@ -1379,19 +1119,9 @@ impl ComputeNode {
|
|||||||
}
|
}
|
||||||
self.post_apply_config()?;
|
self.post_apply_config()?;
|
||||||
|
|
||||||
let conf = self.get_conn_conf(None);
|
let connstr = self.connstr.clone();
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
let res = get_installed_extensions(conf);
|
get_installed_extensions_sync(connstr).context("get_installed_extensions")
|
||||||
match res {
|
|
||||||
Ok(extensions) => {
|
|
||||||
info!(
|
|
||||||
"[NEON_EXT_STAT] {}",
|
|
||||||
serde_json::to_string(&extensions)
|
|
||||||
.expect("failed to serialize extensions list")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(err) => error!("could not get installed extensions: {err:?}"),
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1520,8 +1250,7 @@ impl ComputeNode {
|
|||||||
/// Select `pg_stat_statements` data and return it as a stringified JSON
|
/// Select `pg_stat_statements` data and return it as a stringified JSON
|
||||||
pub async fn collect_insights(&self) -> String {
|
pub async fn collect_insights(&self) -> String {
|
||||||
let mut result_rows: Vec<String> = Vec::new();
|
let mut result_rows: Vec<String> = Vec::new();
|
||||||
let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
|
let connect_result = tokio_postgres::connect(self.connstr.as_str(), NoTls).await;
|
||||||
let connect_result = conf.connect(NoTls).await;
|
|
||||||
let (client, connection) = connect_result.unwrap();
|
let (client, connection) = connect_result.unwrap();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = connection.await {
|
if let Err(e) = connection.await {
|
||||||
@@ -1647,9 +1376,10 @@ LIMIT 100",
|
|||||||
privileges: &[Privilege],
|
privileges: &[Privilege],
|
||||||
role_name: &PgIdent,
|
role_name: &PgIdent,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
use tokio_postgres::config::Config;
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
|
|
||||||
let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
|
let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
|
||||||
conf.dbname(db_name);
|
conf.dbname(db_name);
|
||||||
|
|
||||||
let (db_client, conn) = conf
|
let (db_client, conn) = conf
|
||||||
@@ -1686,9 +1416,10 @@ LIMIT 100",
|
|||||||
db_name: &PgIdent,
|
db_name: &PgIdent,
|
||||||
ext_version: ExtVersion,
|
ext_version: ExtVersion,
|
||||||
) -> Result<ExtVersion> {
|
) -> Result<ExtVersion> {
|
||||||
|
use tokio_postgres::config::Config;
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
|
|
||||||
let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
|
let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
|
||||||
conf.dbname(db_name);
|
conf.dbname(db_name);
|
||||||
|
|
||||||
let (db_client, conn) = conf
|
let (db_client, conn) = conf
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
|
|||||||
pub fn write_postgres_conf(
|
pub fn write_postgres_conf(
|
||||||
path: &Path,
|
path: &Path,
|
||||||
spec: &ComputeSpec,
|
spec: &ComputeSpec,
|
||||||
extension_server_port: u16,
|
extension_server_port: Option<u16>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// File::create() destroys the file content if it exists.
|
// File::create() destroys the file content if it exists.
|
||||||
let mut file = File::create(path)?;
|
let mut file = File::create(path)?;
|
||||||
@@ -116,7 +116,7 @@ pub fn write_postgres_conf(
|
|||||||
vartype: "enum".to_owned(),
|
vartype: "enum".to_owned(),
|
||||||
};
|
};
|
||||||
|
|
||||||
writeln!(file, "{}", opt.to_pg_setting())?;
|
write!(file, "{}", opt.to_pg_setting())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +127,9 @@ pub fn write_postgres_conf(
|
|||||||
writeln!(file, "# Managed by compute_ctl: end")?;
|
writeln!(file, "# Managed by compute_ctl: end")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
if let Some(port) = extension_server_port {
|
||||||
|
writeln!(file, "neon.extension_server_port={}", port)?;
|
||||||
|
}
|
||||||
|
|
||||||
// This is essential to keep this line at the end of the file,
|
// This is essential to keep this line at the end of the file,
|
||||||
// because it is intended to override any settings above.
|
// because it is intended to override any settings above.
|
||||||
|
|||||||
@@ -103,33 +103,14 @@ fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion {
|
pub fn get_pg_version(pgbin: &str) -> String {
|
||||||
// pg_config --version returns a (platform specific) human readable string
|
// pg_config --version returns a (platform specific) human readable string
|
||||||
// such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc.
|
// such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc.
|
||||||
let human_version = get_pg_config("--version", pgbin);
|
let human_version = get_pg_config("--version", pgbin);
|
||||||
parse_pg_version(&human_version)
|
parse_pg_version(&human_version).to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_pg_version_string(pgbin: &str) -> String {
|
fn parse_pg_version(human_version: &str) -> &str {
|
||||||
match get_pg_version(pgbin) {
|
|
||||||
PostgresMajorVersion::V14 => "v14",
|
|
||||||
PostgresMajorVersion::V15 => "v15",
|
|
||||||
PostgresMajorVersion::V16 => "v16",
|
|
||||||
PostgresMajorVersion::V17 => "v17",
|
|
||||||
}
|
|
||||||
.to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
|
||||||
pub enum PostgresMajorVersion {
|
|
||||||
V14,
|
|
||||||
V15,
|
|
||||||
V16,
|
|
||||||
V17,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
|
|
||||||
use PostgresMajorVersion::*;
|
|
||||||
// Normal releases have version strings like "PostgreSQL 15.4". But there
|
// Normal releases have version strings like "PostgreSQL 15.4". But there
|
||||||
// are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL
|
// are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL
|
||||||
// 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version
|
// 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version
|
||||||
@@ -140,10 +121,10 @@ fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
|
|||||||
.captures(human_version)
|
.captures(human_version)
|
||||||
{
|
{
|
||||||
Some(captures) if captures.len() == 2 => match &captures["major"] {
|
Some(captures) if captures.len() == 2 => match &captures["major"] {
|
||||||
"14" => return V14,
|
"14" => return "v14",
|
||||||
"15" => return V15,
|
"15" => return "v15",
|
||||||
"16" => return V16,
|
"16" => return "v16",
|
||||||
"17" => return V17,
|
"17" => return "v17",
|
||||||
_ => {}
|
_ => {}
|
||||||
},
|
},
|
||||||
_ => {}
|
_ => {}
|
||||||
@@ -282,25 +263,24 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_pg_version() {
|
fn test_parse_pg_version() {
|
||||||
use super::PostgresMajorVersion::*;
|
assert_eq!(parse_pg_version("PostgreSQL 15.4"), "v15");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 15.4"), V15);
|
assert_eq!(parse_pg_version("PostgreSQL 15.14"), "v15");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 15.14"), V15);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"),
|
parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"),
|
||||||
V15
|
"v15"
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 14.15"), V14);
|
assert_eq!(parse_pg_version("PostgreSQL 14.15"), "v14");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 14.0"), V14);
|
assert_eq!(parse_pg_version("PostgreSQL 14.0"), "v14");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"),
|
parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"),
|
||||||
V14
|
"v14"
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 16devel"), V16);
|
assert_eq!(parse_pg_version("PostgreSQL 16devel"), "v16");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), V16);
|
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), "v16");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), V16);
|
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), "v16");
|
||||||
assert_eq!(parse_pg_version("PostgreSQL 16extra"), V16);
|
assert_eq!(parse_pg_version("PostgreSQL 16extra"), "v16");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ use anyhow::Result;
|
|||||||
use hyper::header::CONTENT_TYPE;
|
use hyper::header::CONTENT_TYPE;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
use metrics::proto::MetricFamily;
|
|
||||||
use metrics::Encoder;
|
use metrics::Encoder;
|
||||||
use metrics::TextEncoder;
|
use metrics::TextEncoder;
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
@@ -73,22 +72,10 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
(&Method::GET, "/metrics") => {
|
(&Method::GET, "/metrics") => {
|
||||||
debug!("serving /metrics GET request");
|
debug!("serving /metrics GET request");
|
||||||
|
|
||||||
// When we call TextEncoder::encode() below, it will immediately
|
|
||||||
// return an error if a metric family has no metrics, so we need to
|
|
||||||
// preemptively filter out metric families with no metrics.
|
|
||||||
let metrics = installed_extensions::collect()
|
|
||||||
.into_iter()
|
|
||||||
.filter(|m| !m.get_metric().is_empty())
|
|
||||||
.collect::<Vec<MetricFamily>>();
|
|
||||||
|
|
||||||
let encoder = TextEncoder::new();
|
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
|
let metrics = installed_extensions::collect();
|
||||||
if let Err(err) = encoder.encode(&metrics, &mut buffer) {
|
let encoder = TextEncoder::new();
|
||||||
let msg = format!("error handling /metrics request: {err}");
|
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||||
error!(msg);
|
|
||||||
return render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
match Response::builder()
|
match Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
@@ -295,12 +282,8 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
return Response::new(Body::from(msg));
|
return Response::new(Body::from(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
let conf = compute.get_conn_conf(None);
|
let connstr = compute.connstr.clone();
|
||||||
let res =
|
let res = crate::installed_extensions::get_installed_extensions(connstr).await;
|
||||||
task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
Err(e) => render_json_error(
|
Err(e) => render_json_error(
|
||||||
|
|||||||
@@ -2,9 +2,12 @@ use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
|||||||
use metrics::proto::MetricFamily;
|
use metrics::proto::MetricFamily;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use tracing::info;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
|
use tokio::task;
|
||||||
|
|
||||||
use metrics::core::Collector;
|
use metrics::core::Collector;
|
||||||
use metrics::{register_uint_gauge_vec, UIntGaugeVec};
|
use metrics::{register_uint_gauge_vec, UIntGaugeVec};
|
||||||
@@ -39,58 +42,80 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
|
|||||||
///
|
///
|
||||||
/// Same extension can be installed in multiple databases with different versions,
|
/// Same extension can be installed in multiple databases with different versions,
|
||||||
/// we only keep the highest and lowest version across all databases.
|
/// we only keep the highest and lowest version across all databases.
|
||||||
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
pub async fn get_installed_extensions(connstr: Url) -> Result<InstalledExtensions> {
|
||||||
conf.application_name("compute_ctl:get_installed_extensions");
|
let mut connstr = connstr.clone();
|
||||||
let mut client = conf.connect(NoTls)?;
|
|
||||||
|
|
||||||
let databases: Vec<String> = list_dbs(&mut client)?;
|
task::spawn_blocking(move || {
|
||||||
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
|
let databases: Vec<String> = list_dbs(&mut client)?;
|
||||||
|
|
||||||
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
||||||
for db in databases.iter() {
|
for db in databases.iter() {
|
||||||
conf.dbname(db);
|
connstr.set_path(db);
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
let extensions: Vec<(String, String)> = db_client
|
let extensions: Vec<(String, String)> = db_client
|
||||||
.query(
|
.query(
|
||||||
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| (row.get("extname"), row.get("extversion")))
|
.map(|row| (row.get("extname"), row.get("extversion")))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (extname, v) in extensions.iter() {
|
for (extname, v) in extensions.iter() {
|
||||||
let version = v.to_string();
|
let version = v.to_string();
|
||||||
|
|
||||||
// increment the number of databases where the version of extension is installed
|
// increment the number of databases where the version of extension is installed
|
||||||
INSTALLED_EXTENSIONS
|
INSTALLED_EXTENSIONS
|
||||||
.with_label_values(&[extname, &version])
|
.with_label_values(&[extname, &version])
|
||||||
.inc();
|
.inc();
|
||||||
|
|
||||||
extensions_map
|
extensions_map
|
||||||
.entry(extname.to_string())
|
.entry(extname.to_string())
|
||||||
.and_modify(|e| {
|
.and_modify(|e| {
|
||||||
e.versions.insert(version.clone());
|
e.versions.insert(version.clone());
|
||||||
// count the number of databases where the extension is installed
|
// count the number of databases where the extension is installed
|
||||||
e.n_databases += 1;
|
e.n_databases += 1;
|
||||||
})
|
})
|
||||||
.or_insert(InstalledExtension {
|
.or_insert(InstalledExtension {
|
||||||
extname: extname.to_string(),
|
extname: extname.to_string(),
|
||||||
versions: HashSet::from([version.clone()]),
|
versions: HashSet::from([version.clone()]),
|
||||||
n_databases: 1,
|
n_databases: 1,
|
||||||
});
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let res = InstalledExtensions {
|
let res = InstalledExtensions {
|
||||||
extensions: extensions_map.into_values().collect(),
|
extensions: extensions_map.values().cloned().collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
|
})
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather info about installed extensions
|
||||||
|
pub fn get_installed_extensions_sync(connstr: Url) -> Result<()> {
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("failed to create runtime");
|
||||||
|
let result = rt
|
||||||
|
.block_on(crate::installed_extensions::get_installed_extensions(
|
||||||
|
connstr,
|
||||||
|
))
|
||||||
|
.expect("failed to get installed extensions");
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"[NEON_EXT_STAT] {}",
|
||||||
|
serde_json::to_string(&result).expect("failed to serialize extensions list")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
register_uint_gauge_vec!(
|
register_uint_gauge_vec!(
|
||||||
"compute_installed_extensions",
|
"installed_extensions",
|
||||||
"Number of databases where the version of extension is installed",
|
"Number of databases where the version of extension is installed",
|
||||||
&["extension_name", "version"]
|
&["extension_name", "version"]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -23,6 +23,5 @@ pub mod monitor;
|
|||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
mod spec_apply;
|
|
||||||
pub mod swap;
|
pub mod swap;
|
||||||
pub mod sync_sk;
|
pub mod sync_sk;
|
||||||
|
|||||||
@@ -17,8 +17,11 @@ const MONITOR_CHECK_INTERVAL: Duration = Duration::from_millis(500);
|
|||||||
// should be handled gracefully.
|
// should be handled gracefully.
|
||||||
fn watch_compute_activity(compute: &ComputeNode) {
|
fn watch_compute_activity(compute: &ComputeNode) {
|
||||||
// Suppose that `connstr` doesn't change
|
// Suppose that `connstr` doesn't change
|
||||||
let connstr = compute.connstr.clone();
|
let mut connstr = compute.connstr.clone();
|
||||||
let conf = compute.get_conn_conf(Some("compute_ctl:activity_monitor"));
|
connstr
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("application_name", "compute_activity_monitor");
|
||||||
|
let connstr = connstr.as_str();
|
||||||
|
|
||||||
// During startup and configuration we connect to every Postgres database,
|
// During startup and configuration we connect to every Postgres database,
|
||||||
// but we don't want to count this as some user activity. So wait until
|
// but we don't want to count this as some user activity. So wait until
|
||||||
@@ -26,7 +29,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
wait_for_postgres_start(compute);
|
wait_for_postgres_start(compute);
|
||||||
|
|
||||||
// Define `client` outside of the loop to reuse existing connection if it's active.
|
// Define `client` outside of the loop to reuse existing connection if it's active.
|
||||||
let mut client = conf.connect(NoTls);
|
let mut client = Client::connect(connstr, NoTls);
|
||||||
|
|
||||||
let mut sleep = false;
|
let mut sleep = false;
|
||||||
let mut prev_active_time: Option<f64> = None;
|
let mut prev_active_time: Option<f64> = None;
|
||||||
@@ -54,7 +57,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
info!("connection to Postgres is closed, trying to reconnect");
|
info!("connection to Postgres is closed, trying to reconnect");
|
||||||
|
|
||||||
// Connection is closed, reconnect and try again.
|
// Connection is closed, reconnect and try again.
|
||||||
client = conf.connect(NoTls);
|
client = Client::connect(connstr, NoTls);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,7 +196,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
debug!("could not connect to Postgres: {}, retrying", e);
|
debug!("could not connect to Postgres: {}, retrying", e);
|
||||||
|
|
||||||
// Establish a new connection and try again.
|
// Establish a new connection and try again.
|
||||||
client = conf.connect(NoTls);
|
client = Client::connect(connstr, NoTls);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,18 +6,15 @@ use std::io::{BufRead, BufReader};
|
|||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
use std::str::FromStr;
|
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use futures::StreamExt;
|
|
||||||
use ini::Ini;
|
use ini::Ini;
|
||||||
use notify::{RecursiveMode, Watcher};
|
use notify::{RecursiveMode, Watcher};
|
||||||
use postgres::config::Config;
|
use postgres::{Client, Transaction};
|
||||||
use tokio::io::AsyncBufReadExt;
|
use tokio::io::AsyncBufReadExt;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tokio_postgres;
|
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{debug, error, info, instrument};
|
use tracing::{debug, error, info, instrument};
|
||||||
|
|
||||||
@@ -200,34 +197,27 @@ impl Escaping for PgIdent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Build a list of existing Postgres roles
|
/// Build a list of existing Postgres roles
|
||||||
pub async fn get_existing_roles_async(client: &tokio_postgres::Client) -> Result<Vec<Role>> {
|
pub fn get_existing_roles(xact: &mut Transaction<'_>) -> Result<Vec<Role>> {
|
||||||
let postgres_roles = client
|
let postgres_roles = xact
|
||||||
.query_raw::<str, &String, &[String; 0]>(
|
.query("SELECT rolname, rolpassword FROM pg_catalog.pg_authid", &[])?
|
||||||
"SELECT rolname, rolpassword FROM pg_catalog.pg_authid",
|
.iter()
|
||||||
&[],
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.filter_map(|row| async { row.ok() })
|
|
||||||
.map(|row| Role {
|
.map(|row| Role {
|
||||||
name: row.get("rolname"),
|
name: row.get("rolname"),
|
||||||
encrypted_password: row.get("rolpassword"),
|
encrypted_password: row.get("rolpassword"),
|
||||||
options: None,
|
options: None,
|
||||||
})
|
})
|
||||||
.collect()
|
.collect();
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(postgres_roles)
|
Ok(postgres_roles)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a list of existing Postgres databases
|
/// Build a list of existing Postgres databases
|
||||||
pub async fn get_existing_dbs_async(
|
pub fn get_existing_dbs(client: &mut Client) -> Result<HashMap<String, Database>> {
|
||||||
client: &tokio_postgres::Client,
|
|
||||||
) -> Result<HashMap<String, Database>> {
|
|
||||||
// `pg_database.datconnlimit = -2` means that the database is in the
|
// `pg_database.datconnlimit = -2` means that the database is in the
|
||||||
// invalid state. See:
|
// invalid state. See:
|
||||||
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
|
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
|
||||||
let rowstream = client
|
let postgres_dbs: Vec<Database> = client
|
||||||
.query_raw::<str, &String, &[String; 0]>(
|
.query(
|
||||||
"SELECT
|
"SELECT
|
||||||
datname AS name,
|
datname AS name,
|
||||||
datdba::regrole::text AS owner,
|
datdba::regrole::text AS owner,
|
||||||
@@ -236,11 +226,8 @@ pub async fn get_existing_dbs_async(
|
|||||||
FROM
|
FROM
|
||||||
pg_catalog.pg_database;",
|
pg_catalog.pg_database;",
|
||||||
&[],
|
&[],
|
||||||
)
|
)?
|
||||||
.await?;
|
.iter()
|
||||||
|
|
||||||
let dbs_map = rowstream
|
|
||||||
.filter_map(|r| async { r.ok() })
|
|
||||||
.map(|row| Database {
|
.map(|row| Database {
|
||||||
name: row.get("name"),
|
name: row.get("name"),
|
||||||
owner: row.get("owner"),
|
owner: row.get("owner"),
|
||||||
@@ -248,9 +235,12 @@ pub async fn get_existing_dbs_async(
|
|||||||
invalid: row.get("invalid"),
|
invalid: row.get("invalid"),
|
||||||
options: None,
|
options: None,
|
||||||
})
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let dbs_map = postgres_dbs
|
||||||
|
.iter()
|
||||||
.map(|db| (db.name.clone(), db.clone()))
|
.map(|db| (db.name.clone(), db.clone()))
|
||||||
.collect::<HashMap<_, _>>()
|
.collect::<HashMap<_, _>>();
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(dbs_map)
|
Ok(dbs_map)
|
||||||
}
|
}
|
||||||
@@ -545,11 +535,3 @@ async fn handle_postgres_logs_async(stderr: tokio::process::ChildStderr) -> Resu
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `Postgres::config::Config` handles database names with whitespaces
|
|
||||||
/// and special characters properly.
|
|
||||||
pub fn postgres_conf_for_db(connstr: &url::Url, dbname: &str) -> Result<Config> {
|
|
||||||
let mut conf = Config::from_str(connstr.as_str())?;
|
|
||||||
conf.dbname(dbname);
|
|
||||||
Ok(conf)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,17 +1,22 @@
|
|||||||
use anyhow::{anyhow, bail, Result};
|
use std::collections::HashSet;
|
||||||
use postgres::Client;
|
|
||||||
use reqwest::StatusCode;
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tracing::{error, info, instrument, warn};
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use postgres::config::Config;
|
||||||
|
use postgres::{Client, NoTls};
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use tracing::{error, info, info_span, instrument, span_enabled, warn, Level};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
|
use crate::logger::inlinify;
|
||||||
use crate::migration::MigrationRunner;
|
use crate::migration::MigrationRunner;
|
||||||
use crate::params::PG_HBA_ALL_MD5;
|
use crate::params::PG_HBA_ALL_MD5;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
|
|
||||||
use compute_api::responses::{ControlPlaneComputeStatus, ControlPlaneSpecResponse};
|
use compute_api::responses::{ControlPlaneComputeStatus, ControlPlaneSpecResponse};
|
||||||
use compute_api::spec::ComputeSpec;
|
use compute_api::spec::{ComputeSpec, PgIdent, Role};
|
||||||
|
|
||||||
// Do control plane request and return response if any. In case of error it
|
// Do control plane request and return response if any. In case of error it
|
||||||
// returns a bool flag indicating whether it makes sense to retry the request
|
// returns a bool flag indicating whether it makes sense to retry the request
|
||||||
@@ -146,6 +151,625 @@ pub fn add_standby_signal(pgdata_path: &Path) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute could be unexpectedly shut down, for example, during the
|
||||||
|
/// database dropping. This leaves the database in the invalid state,
|
||||||
|
/// which prevents new db creation with the same name. This function
|
||||||
|
/// will clean it up before proceeding with catalog updates. All
|
||||||
|
/// possible future cleanup operations may go here too.
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn cleanup_instance(client: &mut Client) -> Result<()> {
|
||||||
|
let existing_dbs = get_existing_dbs(client)?;
|
||||||
|
|
||||||
|
for (_, db) in existing_dbs {
|
||||||
|
if db.invalid {
|
||||||
|
// After recent commit in Postgres, interrupted DROP DATABASE
|
||||||
|
// leaves the database in the invalid state. According to the
|
||||||
|
// commit message, the only option for user is to drop it again.
|
||||||
|
// See:
|
||||||
|
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
|
||||||
|
//
|
||||||
|
// Postgres Neon extension is done the way, that db is de-registered
|
||||||
|
// in the control plane metadata only after it is dropped. So there is
|
||||||
|
// a chance that it still thinks that db should exist. This means
|
||||||
|
// that it will be re-created by `handle_databases()`. Yet, it's fine
|
||||||
|
// as user can just repeat drop (in vanilla Postgres they would need
|
||||||
|
// to do the same, btw).
|
||||||
|
let query = format!("DROP DATABASE IF EXISTS {}", db.name.pg_quote());
|
||||||
|
info!("dropping invalid database {}", db.name);
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a cluster spec json and open transaction it handles roles creation,
|
||||||
|
/// deletion and update.
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
|
||||||
|
let mut jwks_roles = HashSet::new();
|
||||||
|
if let Some(local_proxy) = &spec.local_proxy_config {
|
||||||
|
for jwks_setting in local_proxy.jwks.iter().flatten() {
|
||||||
|
for role_name in &jwks_setting.role_names {
|
||||||
|
jwks_roles.insert(role_name.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print a list of existing Postgres roles (only in debug mode)
|
||||||
|
if span_enabled!(Level::INFO) {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
for r in &existing_roles {
|
||||||
|
vec.push(format!(
|
||||||
|
"{}:{}",
|
||||||
|
r.name,
|
||||||
|
if r.encrypted_password.is_some() {
|
||||||
|
"[FILTERED]"
|
||||||
|
} else {
|
||||||
|
"(null)"
|
||||||
|
}
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("postgres roles (total {}): {:?}", vec.len(), vec);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process delta operations first
|
||||||
|
if let Some(ops) = &spec.delta_operations {
|
||||||
|
info!("processing role renames");
|
||||||
|
for op in ops {
|
||||||
|
match op.action.as_ref() {
|
||||||
|
"delete_role" => {
|
||||||
|
// no-op now, roles will be deleted at the end of configuration
|
||||||
|
}
|
||||||
|
// Renaming role drops its password, since role name is
|
||||||
|
// used as a salt there. It is important that this role
|
||||||
|
// is recorded with a new `name` in the `roles` list.
|
||||||
|
// Follow up roles update will set the new password.
|
||||||
|
"rename_role" => {
|
||||||
|
let new_name = op.new_name.as_ref().unwrap();
|
||||||
|
|
||||||
|
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
||||||
|
if existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
|
let query: String = format!(
|
||||||
|
"ALTER ROLE {} RENAME TO {}",
|
||||||
|
op.name.pg_quote(),
|
||||||
|
new_name.pg_quote()
|
||||||
|
);
|
||||||
|
|
||||||
|
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh Postgres roles info to handle possible roles renaming
|
||||||
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"handling cluster spec roles (total {})",
|
||||||
|
spec.cluster.roles.len()
|
||||||
|
);
|
||||||
|
for role in &spec.cluster.roles {
|
||||||
|
let name = &role.name;
|
||||||
|
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
||||||
|
let pg_role = existing_roles.iter().find(|r| r.name == *name);
|
||||||
|
|
||||||
|
enum RoleAction {
|
||||||
|
None,
|
||||||
|
Update,
|
||||||
|
Create,
|
||||||
|
}
|
||||||
|
let action = if let Some(r) = pg_role {
|
||||||
|
if (r.encrypted_password.is_none() && role.encrypted_password.is_some())
|
||||||
|
|| (r.encrypted_password.is_some() && role.encrypted_password.is_none())
|
||||||
|
{
|
||||||
|
RoleAction::Update
|
||||||
|
} else if let Some(pg_pwd) = &r.encrypted_password {
|
||||||
|
// Check whether password changed or not (trim 'md5' prefix first if any)
|
||||||
|
//
|
||||||
|
// This is a backward compatibility hack, which comes from the times when we were using
|
||||||
|
// md5 for everyone and hashes were stored in the console db without md5 prefix. So when
|
||||||
|
// role comes from the control-plane (json spec) `Role.encrypted_password` doesn't have md5 prefix,
|
||||||
|
// but when role comes from Postgres (`get_existing_roles` / `existing_roles`) it has this prefix.
|
||||||
|
// Here is the only place so far where we compare hashes, so it seems to be the best candidate
|
||||||
|
// to place this compatibility layer.
|
||||||
|
let pg_pwd = if let Some(stripped) = pg_pwd.strip_prefix("md5") {
|
||||||
|
stripped
|
||||||
|
} else {
|
||||||
|
pg_pwd
|
||||||
|
};
|
||||||
|
if pg_pwd != *role.encrypted_password.as_ref().unwrap() {
|
||||||
|
RoleAction::Update
|
||||||
|
} else {
|
||||||
|
RoleAction::None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoleAction::None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoleAction::Create
|
||||||
|
};
|
||||||
|
|
||||||
|
match action {
|
||||||
|
RoleAction::None => {}
|
||||||
|
RoleAction::Update => {
|
||||||
|
// This can be run on /every/ role! Not just ones created through the console.
|
||||||
|
// This means that if you add some funny ALTER here that adds a permission,
|
||||||
|
// this will get run even on user-created roles! This will result in different
|
||||||
|
// behavior before and after a spec gets reapplied. The below ALTER as it stands
|
||||||
|
// now only grants LOGIN and changes the password. Please do not allow this branch
|
||||||
|
// to do anything silly.
|
||||||
|
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote());
|
||||||
|
query.push_str(&role.to_pg_options());
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
RoleAction::Create => {
|
||||||
|
// This branch only runs when roles are created through the console, so it is
|
||||||
|
// safe to add more permissions here. BYPASSRLS and REPLICATION are inherited
|
||||||
|
// from neon_superuser.
|
||||||
|
let mut query: String = format!(
|
||||||
|
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser",
|
||||||
|
name.pg_quote()
|
||||||
|
);
|
||||||
|
if jwks_roles.contains(name.as_str()) {
|
||||||
|
query = format!("CREATE ROLE {}", name.pg_quote());
|
||||||
|
}
|
||||||
|
info!("running role create query: '{}'", &query);
|
||||||
|
query.push_str(&role.to_pg_options());
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if span_enabled!(Level::INFO) {
|
||||||
|
let pwd = if role.encrypted_password.is_some() {
|
||||||
|
"[FILTERED]"
|
||||||
|
} else {
|
||||||
|
"(null)"
|
||||||
|
};
|
||||||
|
let action_str = match action {
|
||||||
|
RoleAction::None => "",
|
||||||
|
RoleAction::Create => " -> create",
|
||||||
|
RoleAction::Update => " -> update",
|
||||||
|
};
|
||||||
|
info!(" - {}:{}{}", name, pwd, action_str);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
xact.commit()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reassign all dependent objects and delete requested roles.
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_role_deletions(spec: &ComputeSpec, connstr: &str, client: &mut Client) -> Result<()> {
|
||||||
|
if let Some(ops) = &spec.delta_operations {
|
||||||
|
// First, reassign all dependent objects to db owners.
|
||||||
|
info!("reassigning dependent objects of to-be-deleted roles");
|
||||||
|
|
||||||
|
// Fetch existing roles. We could've exported and used `existing_roles` from
|
||||||
|
// `handle_roles()`, but we only make this list there before creating new roles.
|
||||||
|
// Which is probably fine as we never create to-be-deleted roles, but that'd
|
||||||
|
// just look a bit untidy. Anyway, the entire `pg_roles` should be in shared
|
||||||
|
// buffers already, so this shouldn't be a big deal.
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
xact.commit()?;
|
||||||
|
|
||||||
|
for op in ops {
|
||||||
|
// Check that role is still present in Postgres, as this could be a
|
||||||
|
// restart with the same spec after role deletion.
|
||||||
|
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
|
reassign_owned_objects(spec, connstr, &op.name)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second, proceed with role deletions.
|
||||||
|
info!("processing role deletions");
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
for op in ops {
|
||||||
|
// We do not check either role exists or not,
|
||||||
|
// Postgres will take care of it for us
|
||||||
|
if op.action == "delete_role" {
|
||||||
|
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote());
|
||||||
|
|
||||||
|
warn!("deleting role '{}'", &op.name);
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
xact.commit()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reassign_owned_objects_in_one_db(
|
||||||
|
conf: Config,
|
||||||
|
role_name: &PgIdent,
|
||||||
|
db_owner: &PgIdent,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
|
// This will reassign all dependent objects to the db owner
|
||||||
|
let reassign_query = format!(
|
||||||
|
"REASSIGN OWNED BY {} TO {}",
|
||||||
|
role_name.pg_quote(),
|
||||||
|
db_owner.pg_quote()
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
||||||
|
role_name,
|
||||||
|
conf.get_dbname().unwrap_or(""),
|
||||||
|
db_owner
|
||||||
|
);
|
||||||
|
client.simple_query(&reassign_query)?;
|
||||||
|
|
||||||
|
// This now will only drop privileges of the role
|
||||||
|
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote());
|
||||||
|
client.simple_query(&drop_query)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reassign all owned objects in all databases to the owner of the database.
|
||||||
|
fn reassign_owned_objects(spec: &ComputeSpec, connstr: &str, role_name: &PgIdent) -> Result<()> {
|
||||||
|
for db in &spec.cluster.databases {
|
||||||
|
if db.owner != *role_name {
|
||||||
|
let mut conf = Config::from_str(connstr)?;
|
||||||
|
conf.dbname(&db.name);
|
||||||
|
reassign_owned_objects_in_one_db(conf, role_name, &db.owner)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also handle case when there are no databases in the spec.
|
||||||
|
// In this case we need to reassign objects in the default database.
|
||||||
|
let conf = Config::from_str(connstr)?;
|
||||||
|
let db_owner = PgIdent::from_str("cloud_admin")?;
|
||||||
|
reassign_owned_objects_in_one_db(conf, role_name, &db_owner)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// It follows mostly the same logic as `handle_roles()` excepting that we
|
||||||
|
/// does not use an explicit transactions block, since major database operations
|
||||||
|
/// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level
|
||||||
|
/// atomicity should be enough here due to the order of operations and various checks,
|
||||||
|
/// which together provide us idempotency.
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
|
let existing_dbs = get_existing_dbs(client)?;
|
||||||
|
|
||||||
|
// Print a list of existing Postgres databases (only in debug mode)
|
||||||
|
if span_enabled!(Level::INFO) {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
for (dbname, db) in &existing_dbs {
|
||||||
|
vec.push(format!("{}:{}", dbname, db.owner));
|
||||||
|
}
|
||||||
|
info!("postgres databases (total {}): {:?}", vec.len(), vec);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process delta operations first
|
||||||
|
if let Some(ops) = &spec.delta_operations {
|
||||||
|
info!("processing delta operations on databases");
|
||||||
|
for op in ops {
|
||||||
|
match op.action.as_ref() {
|
||||||
|
// We do not check either DB exists or not,
|
||||||
|
// Postgres will take care of it for us
|
||||||
|
"delete_db" => {
|
||||||
|
// In Postgres we can't drop a database if it is a template.
|
||||||
|
// So we need to unset the template flag first, but it could
|
||||||
|
// be a retry, so we could've already dropped the database.
|
||||||
|
// Check that database exists first to make it idempotent.
|
||||||
|
let unset_template_query: String = format!(
|
||||||
|
"
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_catalog.pg_database
|
||||||
|
WHERE datname = {}
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
ALTER DATABASE {} is_template false;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;",
|
||||||
|
escape_literal(&op.name),
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
// Use FORCE to drop database even if there are active connections.
|
||||||
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
|
// the database. For example, if db is used by any active subscription
|
||||||
|
// or replication slot.
|
||||||
|
// TODO: deal with it once we allow logical replication. Proper fix should
|
||||||
|
// involve returning an error code to the control plane, so it could
|
||||||
|
// figure out that this is a non-retryable error, return it to the user
|
||||||
|
// and fail operation permanently.
|
||||||
|
let drop_db_query: String = format!(
|
||||||
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
|
||||||
|
warn!("deleting database '{}'", &op.name);
|
||||||
|
client.execute(unset_template_query.as_str(), &[])?;
|
||||||
|
client.execute(drop_db_query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
"rename_db" => {
|
||||||
|
let new_name = op.new_name.as_ref().unwrap();
|
||||||
|
|
||||||
|
if existing_dbs.contains_key(&op.name) {
|
||||||
|
let query: String = format!(
|
||||||
|
"ALTER DATABASE {} RENAME TO {}",
|
||||||
|
op.name.pg_quote(),
|
||||||
|
new_name.pg_quote()
|
||||||
|
);
|
||||||
|
|
||||||
|
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh Postgres databases info to handle possible renames
|
||||||
|
let existing_dbs = get_existing_dbs(client)?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"handling cluster spec databases (total {})",
|
||||||
|
spec.cluster.databases.len()
|
||||||
|
);
|
||||||
|
for db in &spec.cluster.databases {
|
||||||
|
let name = &db.name;
|
||||||
|
let pg_db = existing_dbs.get(name);
|
||||||
|
|
||||||
|
enum DatabaseAction {
|
||||||
|
None,
|
||||||
|
Update,
|
||||||
|
Create,
|
||||||
|
}
|
||||||
|
let action = if let Some(r) = pg_db {
|
||||||
|
// XXX: db owner name is returned as quoted string from Postgres,
|
||||||
|
// when quoting is needed.
|
||||||
|
let new_owner = if r.owner.starts_with('"') {
|
||||||
|
db.owner.pg_quote()
|
||||||
|
} else {
|
||||||
|
db.owner.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
if new_owner != r.owner {
|
||||||
|
// Update the owner
|
||||||
|
DatabaseAction::Update
|
||||||
|
} else {
|
||||||
|
DatabaseAction::None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
DatabaseAction::Create
|
||||||
|
};
|
||||||
|
|
||||||
|
match action {
|
||||||
|
DatabaseAction::None => {}
|
||||||
|
DatabaseAction::Update => {
|
||||||
|
let query: String = format!(
|
||||||
|
"ALTER DATABASE {} OWNER TO {}",
|
||||||
|
name.pg_quote(),
|
||||||
|
db.owner.pg_quote()
|
||||||
|
);
|
||||||
|
let _guard = info_span!("executing", query).entered();
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
DatabaseAction::Create => {
|
||||||
|
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
||||||
|
query.push_str(&db.to_pg_options());
|
||||||
|
let _guard = info_span!("executing", query).entered();
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
let grant_query: String = format!(
|
||||||
|
"GRANT ALL PRIVILEGES ON DATABASE {} TO neon_superuser",
|
||||||
|
name.pg_quote()
|
||||||
|
);
|
||||||
|
client.execute(grant_query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if span_enabled!(Level::INFO) {
|
||||||
|
let action_str = match action {
|
||||||
|
DatabaseAction::None => "",
|
||||||
|
DatabaseAction::Create => " -> create",
|
||||||
|
DatabaseAction::Update => " -> update",
|
||||||
|
};
|
||||||
|
info!(" - {}:{}{}", db.name, db.owner, action_str);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_grants(
|
||||||
|
spec: &ComputeSpec,
|
||||||
|
client: &mut Client,
|
||||||
|
connstr: &str,
|
||||||
|
enable_anon_extension: bool,
|
||||||
|
) -> Result<()> {
|
||||||
|
info!("modifying database permissions");
|
||||||
|
let existing_dbs = get_existing_dbs(client)?;
|
||||||
|
|
||||||
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
|
// atomically.
|
||||||
|
for db in &spec.cluster.databases {
|
||||||
|
match existing_dbs.get(&db.name) {
|
||||||
|
Some(pg_db) => {
|
||||||
|
if pg_db.restrict_conn || pg_db.invalid {
|
||||||
|
info!(
|
||||||
|
"skipping grants for db {} (invalid: {}, connections not allowed: {})",
|
||||||
|
db.name, pg_db.invalid, pg_db.restrict_conn
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
bail!(
|
||||||
|
"database {} doesn't exist in Postgres after handle_databases()",
|
||||||
|
db.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut conf = Config::from_str(connstr)?;
|
||||||
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
|
// This will only change ownership on the schema itself, not the objects
|
||||||
|
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
||||||
|
// and database owner cannot do anything with it. SQL procedure ensures
|
||||||
|
// that it won't error out if schema `public` doesn't exist.
|
||||||
|
let alter_query = format!(
|
||||||
|
"DO $$\n\
|
||||||
|
DECLARE\n\
|
||||||
|
schema_owner TEXT;\n\
|
||||||
|
BEGIN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
SELECT nspowner::regrole::text\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
INTO schema_owner;\n\
|
||||||
|
\n\
|
||||||
|
IF schema_owner = 'cloud_admin' OR schema_owner = 'zenith_admin'\n\
|
||||||
|
THEN\n\
|
||||||
|
ALTER SCHEMA public OWNER TO {};\n\
|
||||||
|
END IF;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END\n\
|
||||||
|
$$;",
|
||||||
|
db.owner.pg_quote()
|
||||||
|
);
|
||||||
|
db_client.simple_query(&alter_query)?;
|
||||||
|
|
||||||
|
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
|
||||||
|
// This is needed because since postgres 15 this privilege is removed by default.
|
||||||
|
// TODO: web_access isn't created for almost 1 year. It could be that we have
|
||||||
|
// active users of 1 year old projects, but hopefully not, so check it and
|
||||||
|
// remove this code if possible. The worst thing that could happen is that
|
||||||
|
// user won't be able to use public schema in NEW databases created in the
|
||||||
|
// very OLD project.
|
||||||
|
//
|
||||||
|
// Also, alter default permissions so that relations created by extensions can be
|
||||||
|
// used by neon_superuser without permission issues.
|
||||||
|
let grant_query = "DO $$\n\
|
||||||
|
BEGIN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
) AND\n\
|
||||||
|
current_setting('server_version_num')::int/10000 >= 15\n\
|
||||||
|
THEN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT rolname\n\
|
||||||
|
FROM pg_catalog.pg_roles\n\
|
||||||
|
WHERE rolname = 'web_access'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
GRANT CREATE ON SCHEMA public TO web_access;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END IF;\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser WITH GRANT OPTION;\n\
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser WITH GRANT OPTION;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END\n\
|
||||||
|
$$;"
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"grant query for db {} : {}",
|
||||||
|
&db.name,
|
||||||
|
inlinify(&grant_query)
|
||||||
|
);
|
||||||
|
db_client.simple_query(&grant_query)?;
|
||||||
|
|
||||||
|
// it is important to run this after all grants
|
||||||
|
if enable_anon_extension {
|
||||||
|
handle_extension_anon(spec, &db.owner, &mut db_client, false)
|
||||||
|
.context("handle_grants handle_extension_anon")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create required system extensions
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_extensions(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
|
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
|
||||||
|
if libs.contains("pg_stat_statements") {
|
||||||
|
// Create extension only if this compute really needs it
|
||||||
|
let query = "CREATE EXTENSION IF NOT EXISTS pg_stat_statements";
|
||||||
|
info!("creating system extensions with query: {}", query);
|
||||||
|
client.simple_query(query)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run CREATE and ALTER EXTENSION neon UPDATE for postgres database
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
pub fn handle_extension_neon(client: &mut Client) -> Result<()> {
|
||||||
|
info!("handle extension neon");
|
||||||
|
|
||||||
|
let mut query = "CREATE SCHEMA IF NOT EXISTS neon";
|
||||||
|
client.simple_query(query)?;
|
||||||
|
|
||||||
|
query = "CREATE EXTENSION IF NOT EXISTS neon WITH SCHEMA neon";
|
||||||
|
info!("create neon extension with query: {}", query);
|
||||||
|
client.simple_query(query)?;
|
||||||
|
|
||||||
|
query = "UPDATE pg_extension SET extrelocatable = true WHERE extname = 'neon'";
|
||||||
|
client.simple_query(query)?;
|
||||||
|
|
||||||
|
query = "ALTER EXTENSION neon SET SCHEMA neon";
|
||||||
|
info!("alter neon extension schema with query: {}", query);
|
||||||
|
client.simple_query(query)?;
|
||||||
|
|
||||||
|
// this will be a no-op if extension is already up to date,
|
||||||
|
// which may happen in two cases:
|
||||||
|
// - extension was just installed
|
||||||
|
// - extension was already installed and is up to date
|
||||||
|
let query = "ALTER EXTENSION neon UPDATE";
|
||||||
|
info!("update neon extension version with query: {}", query);
|
||||||
|
if let Err(e) = client.simple_query(query) {
|
||||||
|
error!(
|
||||||
|
"failed to upgrade neon extension during `handle_extension_neon`: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> {
|
pub fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> {
|
||||||
info!("handle neon extension upgrade");
|
info!("handle neon extension upgrade");
|
||||||
|
|||||||
@@ -1,680 +0,0 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::fmt::{Debug, Formatter};
|
|
||||||
use std::future::Future;
|
|
||||||
use std::iter::empty;
|
|
||||||
use std::iter::once;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::compute::construct_superuser_query;
|
|
||||||
use crate::pg_helpers::{escape_literal, DatabaseExt, Escaping, GenericOptionsSearch, RoleExt};
|
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use compute_api::spec::{ComputeFeature, ComputeSpec, Database, PgIdent, Role};
|
|
||||||
use futures::future::join_all;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
use tokio_postgres::Client;
|
|
||||||
use tracing::{debug, info_span, Instrument};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub enum DB {
|
|
||||||
SystemDB,
|
|
||||||
UserDB(Database),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DB {
|
|
||||||
pub fn new(db: Database) -> DB {
|
|
||||||
Self::UserDB(db)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_owned_by(&self, role: &PgIdent) -> bool {
|
|
||||||
match self {
|
|
||||||
DB::SystemDB => false,
|
|
||||||
DB::UserDB(db) => &db.owner == role,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for DB {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
DB::SystemDB => f.debug_tuple("SystemDB").finish(),
|
|
||||||
DB::UserDB(db) => f.debug_tuple("UserDB").field(&db.name).finish(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
|
||||||
pub enum PerDatabasePhase {
|
|
||||||
DeleteDBRoleReferences,
|
|
||||||
ChangeSchemaPerms,
|
|
||||||
HandleAnonExtension,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum ApplySpecPhase {
|
|
||||||
CreateSuperUser,
|
|
||||||
DropInvalidDatabases,
|
|
||||||
RenameRoles,
|
|
||||||
CreateAndAlterRoles,
|
|
||||||
RenameAndDeleteDatabases,
|
|
||||||
CreateAndAlterDatabases,
|
|
||||||
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
|
||||||
HandleOtherExtensions,
|
|
||||||
HandleNeonExtension,
|
|
||||||
CreateAvailabilityCheck,
|
|
||||||
DropRoles,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Operation {
|
|
||||||
pub query: String,
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct MutableApplyContext {
|
|
||||||
pub roles: HashMap<String, Role>,
|
|
||||||
pub dbs: HashMap<String, Database>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Appply the operations that belong to the given spec apply phase.
|
|
||||||
///
|
|
||||||
/// Commands within a single phase are executed in order of Iterator yield.
|
|
||||||
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
|
|
||||||
/// indicated by its `db` field, and can share a single client for all changes
|
|
||||||
/// to that database.
|
|
||||||
///
|
|
||||||
/// Notes:
|
|
||||||
/// - Commands are pipelined, and thus may cause incomplete apply if one
|
|
||||||
/// command of many fails.
|
|
||||||
/// - Failing commands will fail the phase's apply step once the return value
|
|
||||||
/// is processed.
|
|
||||||
/// - No timeouts have (yet) been implemented.
|
|
||||||
/// - The caller is responsible for limiting and/or applying concurrency.
|
|
||||||
pub async fn apply_operations<'a, Fut, F>(
|
|
||||||
spec: Arc<ComputeSpec>,
|
|
||||||
ctx: Arc<RwLock<MutableApplyContext>>,
|
|
||||||
jwks_roles: Arc<HashSet<String>>,
|
|
||||||
apply_spec_phase: ApplySpecPhase,
|
|
||||||
client: F,
|
|
||||||
) -> Result<()>
|
|
||||||
where
|
|
||||||
F: FnOnce() -> Fut,
|
|
||||||
Fut: Future<Output = Result<&'a Client>>,
|
|
||||||
{
|
|
||||||
debug!("Starting phase {:?}", &apply_spec_phase);
|
|
||||||
let span = info_span!("db_apply_changes", phase=?apply_spec_phase);
|
|
||||||
let span2 = span.clone();
|
|
||||||
async move {
|
|
||||||
debug!("Processing phase {:?}", &apply_spec_phase);
|
|
||||||
let ctx = ctx;
|
|
||||||
|
|
||||||
let mut ops = get_operations(&spec, &ctx, &jwks_roles, &apply_spec_phase)
|
|
||||||
.await?
|
|
||||||
.peekable();
|
|
||||||
|
|
||||||
// Return (and by doing so, skip requesting the PostgreSQL client) if
|
|
||||||
// we don't have any operations scheduled.
|
|
||||||
if ops.peek().is_none() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let client = client().await?;
|
|
||||||
|
|
||||||
debug!("Applying phase {:?}", &apply_spec_phase);
|
|
||||||
|
|
||||||
let active_queries = ops
|
|
||||||
.map(|op| {
|
|
||||||
let Operation { comment, query } = op;
|
|
||||||
let inspan = match comment {
|
|
||||||
None => span.clone(),
|
|
||||||
Some(comment) => info_span!("phase {}: {}", comment),
|
|
||||||
};
|
|
||||||
|
|
||||||
async {
|
|
||||||
let query = query;
|
|
||||||
let res = client.simple_query(&query).await;
|
|
||||||
debug!(
|
|
||||||
"{} {}",
|
|
||||||
if res.is_ok() {
|
|
||||||
"successfully executed"
|
|
||||||
} else {
|
|
||||||
"failed to execute"
|
|
||||||
},
|
|
||||||
query
|
|
||||||
);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
.instrument(inspan)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
drop(ctx);
|
|
||||||
|
|
||||||
for it in join_all(active_queries).await {
|
|
||||||
drop(it?);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Completed phase {:?}", &apply_spec_phase);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.instrument(span2)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a stream of operations to be executed for that phase of applying
|
|
||||||
/// changes.
|
|
||||||
///
|
|
||||||
/// In the future we may generate a single stream of changes and then
|
|
||||||
/// sort/merge/batch execution, but for now this is a nice way to improve
|
|
||||||
/// batching behaviour of the commands.
|
|
||||||
async fn get_operations<'a>(
|
|
||||||
spec: &'a ComputeSpec,
|
|
||||||
ctx: &'a RwLock<MutableApplyContext>,
|
|
||||||
jwks_roles: &'a HashSet<String>,
|
|
||||||
apply_spec_phase: &'a ApplySpecPhase,
|
|
||||||
) -> Result<Box<dyn Iterator<Item = Operation> + 'a + Send>> {
|
|
||||||
match apply_spec_phase {
|
|
||||||
ApplySpecPhase::CreateSuperUser => {
|
|
||||||
let query = construct_superuser_query(spec);
|
|
||||||
|
|
||||||
Ok(Box::new(once(Operation {
|
|
||||||
query,
|
|
||||||
comment: None,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::DropInvalidDatabases => {
|
|
||||||
let mut ctx = ctx.write().await;
|
|
||||||
let databases = &mut ctx.dbs;
|
|
||||||
|
|
||||||
let keys: Vec<_> = databases
|
|
||||||
.iter()
|
|
||||||
.filter(|(_, db)| db.invalid)
|
|
||||||
.map(|(dbname, _)| dbname.clone())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// After recent commit in Postgres, interrupted DROP DATABASE
|
|
||||||
// leaves the database in the invalid state. According to the
|
|
||||||
// commit message, the only option for user is to drop it again.
|
|
||||||
// See:
|
|
||||||
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
|
|
||||||
//
|
|
||||||
// Postgres Neon extension is done the way, that db is de-registered
|
|
||||||
// in the control plane metadata only after it is dropped. So there is
|
|
||||||
// a chance that it still thinks that the db should exist. This means
|
|
||||||
// that it will be re-created by the `CreateDatabases` phase. This
|
|
||||||
// is fine, as user can just drop the table again (in vanilla
|
|
||||||
// Postgres they would need to do the same).
|
|
||||||
let operations = keys
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(move |dbname| ctx.dbs.remove(&dbname))
|
|
||||||
.map(|db| Operation {
|
|
||||||
query: format!("DROP DATABASE IF EXISTS {}", db.name.pg_quote()),
|
|
||||||
comment: Some(format!("Dropping invalid database {}", db.name)),
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::RenameRoles => {
|
|
||||||
let mut ctx = ctx.write().await;
|
|
||||||
|
|
||||||
let operations = spec
|
|
||||||
.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter(|op| op.action == "rename_role")
|
|
||||||
.filter_map(move |op| {
|
|
||||||
let roles = &mut ctx.roles;
|
|
||||||
|
|
||||||
if roles.contains_key(op.name.as_str()) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let new_name = op.new_name.as_ref().unwrap();
|
|
||||||
let mut role = roles.remove(op.name.as_str()).unwrap();
|
|
||||||
|
|
||||||
role.name = new_name.clone();
|
|
||||||
role.encrypted_password = None;
|
|
||||||
roles.insert(role.name.clone(), role);
|
|
||||||
|
|
||||||
Some(Operation {
|
|
||||||
query: format!(
|
|
||||||
"ALTER ROLE {} RENAME TO {}",
|
|
||||||
op.name.pg_quote(),
|
|
||||||
new_name.pg_quote()
|
|
||||||
),
|
|
||||||
comment: Some(format!("renaming role '{}' to '{}'", op.name, new_name)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::CreateAndAlterRoles => {
|
|
||||||
let mut ctx = ctx.write().await;
|
|
||||||
|
|
||||||
let operations = spec.cluster.roles
|
|
||||||
.iter()
|
|
||||||
.filter_map(move |role| {
|
|
||||||
let roles = &mut ctx.roles;
|
|
||||||
let db_role = roles.get(&role.name);
|
|
||||||
|
|
||||||
match db_role {
|
|
||||||
Some(db_role) => {
|
|
||||||
if db_role.encrypted_password != role.encrypted_password {
|
|
||||||
// This can be run on /every/ role! Not just ones created through the console.
|
|
||||||
// This means that if you add some funny ALTER here that adds a permission,
|
|
||||||
// this will get run even on user-created roles! This will result in different
|
|
||||||
// behavior before and after a spec gets reapplied. The below ALTER as it stands
|
|
||||||
// now only grants LOGIN and changes the password. Please do not allow this branch
|
|
||||||
// to do anything silly.
|
|
||||||
Some(Operation {
|
|
||||||
query: format!(
|
|
||||||
"ALTER ROLE {} {}",
|
|
||||||
role.name.pg_quote(),
|
|
||||||
role.to_pg_options(),
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let query = if !jwks_roles.contains(role.name.as_str()) {
|
|
||||||
format!(
|
|
||||||
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser {}",
|
|
||||||
role.name.pg_quote(),
|
|
||||||
role.to_pg_options(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
format!(
|
|
||||||
"CREATE ROLE {} {}",
|
|
||||||
role.name.pg_quote(),
|
|
||||||
role.to_pg_options(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
Some(Operation {
|
|
||||||
query,
|
|
||||||
comment: Some(format!("creating role {}", role.name)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::RenameAndDeleteDatabases => {
|
|
||||||
let mut ctx = ctx.write().await;
|
|
||||||
|
|
||||||
let operations = spec
|
|
||||||
.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter_map(move |op| {
|
|
||||||
let databases = &mut ctx.dbs;
|
|
||||||
match op.action.as_str() {
|
|
||||||
// We do not check whether the DB exists or not,
|
|
||||||
// Postgres will take care of it for us
|
|
||||||
"delete_db" => {
|
|
||||||
// In Postgres we can't drop a database if it is a template.
|
|
||||||
// So we need to unset the template flag first, but it could
|
|
||||||
// be a retry, so we could've already dropped the database.
|
|
||||||
// Check that database exists first to make it idempotent.
|
|
||||||
let unset_template_query: String = format!(
|
|
||||||
include_str!("sql/unset_template_for_drop_dbs.sql"),
|
|
||||||
datname_str = escape_literal(&op.name),
|
|
||||||
datname = &op.name.pg_quote()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use FORCE to drop database even if there are active connections.
|
|
||||||
// We run this from `cloud_admin`, so it should have enough privileges.
|
|
||||||
// NB: there could be other db states, which prevent us from dropping
|
|
||||||
// the database. For example, if db is used by any active subscription
|
|
||||||
// or replication slot.
|
|
||||||
// TODO: deal with it once we allow logical replication. Proper fix should
|
|
||||||
// involve returning an error code to the control plane, so it could
|
|
||||||
// figure out that this is a non-retryable error, return it to the user
|
|
||||||
// and fail operation permanently.
|
|
||||||
let drop_db_query: String = format!(
|
|
||||||
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
|
||||||
&op.name.pg_quote()
|
|
||||||
);
|
|
||||||
|
|
||||||
databases.remove(&op.name);
|
|
||||||
|
|
||||||
Some(vec![
|
|
||||||
Operation {
|
|
||||||
query: unset_template_query,
|
|
||||||
comment: Some(format!(
|
|
||||||
"optionally clearing template flags for DB {}",
|
|
||||||
op.name,
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: drop_db_query,
|
|
||||||
comment: Some(format!("deleting database {}", op.name,)),
|
|
||||||
},
|
|
||||||
])
|
|
||||||
}
|
|
||||||
"rename_db" => {
|
|
||||||
if let Some(mut db) = databases.remove(&op.name) {
|
|
||||||
// update state of known databases
|
|
||||||
let new_name = op.new_name.as_ref().unwrap();
|
|
||||||
db.name = new_name.clone();
|
|
||||||
databases.insert(db.name.clone(), db);
|
|
||||||
|
|
||||||
Some(vec![Operation {
|
|
||||||
query: format!(
|
|
||||||
"ALTER DATABASE {} RENAME TO {}",
|
|
||||||
op.name.pg_quote(),
|
|
||||||
new_name.pg_quote(),
|
|
||||||
),
|
|
||||||
comment: Some(format!(
|
|
||||||
"renaming database '{}' to '{}'",
|
|
||||||
op.name, new_name
|
|
||||||
)),
|
|
||||||
}])
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::CreateAndAlterDatabases => {
|
|
||||||
let mut ctx = ctx.write().await;
|
|
||||||
|
|
||||||
let operations = spec
|
|
||||||
.cluster
|
|
||||||
.databases
|
|
||||||
.iter()
|
|
||||||
.filter_map(move |db| {
|
|
||||||
let databases = &mut ctx.dbs;
|
|
||||||
if let Some(edb) = databases.get_mut(&db.name) {
|
|
||||||
let change_owner = if edb.owner.starts_with('"') {
|
|
||||||
db.owner.pg_quote() != edb.owner
|
|
||||||
} else {
|
|
||||||
db.owner != edb.owner
|
|
||||||
};
|
|
||||||
|
|
||||||
edb.owner = db.owner.clone();
|
|
||||||
|
|
||||||
if change_owner {
|
|
||||||
Some(vec![Operation {
|
|
||||||
query: format!(
|
|
||||||
"ALTER DATABASE {} OWNER TO {}",
|
|
||||||
db.name.pg_quote(),
|
|
||||||
db.owner.pg_quote()
|
|
||||||
),
|
|
||||||
comment: Some(format!(
|
|
||||||
"changing database owner of database {} to {}",
|
|
||||||
db.name, db.owner
|
|
||||||
)),
|
|
||||||
}])
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
databases.insert(db.name.clone(), db.clone());
|
|
||||||
|
|
||||||
Some(vec![
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"CREATE DATABASE {} {}",
|
|
||||||
db.name.pg_quote(),
|
|
||||||
db.to_pg_options(),
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"GRANT ALL PRIVILEGES ON DATABASE {} TO neon_superuser",
|
|
||||||
db.name.pg_quote()
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
|
||||||
match subphase {
|
|
||||||
PerDatabasePhase::DeleteDBRoleReferences => {
|
|
||||||
let ctx = ctx.read().await;
|
|
||||||
|
|
||||||
let operations =
|
|
||||||
spec.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter(|op| op.action == "delete_role")
|
|
||||||
.filter_map(move |op| {
|
|
||||||
if db.is_owned_by(&op.name) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
if !ctx.roles.contains_key(&op.name) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let quoted = op.name.pg_quote();
|
|
||||||
let new_owner = match &db {
|
|
||||||
DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(),
|
|
||||||
DB::UserDB(db) => db.owner.pg_quote(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(vec![
|
|
||||||
// This will reassign all dependent objects to the db owner
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"REASSIGN OWNED BY {} TO {}",
|
|
||||||
quoted, new_owner,
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
// This now will only drop privileges of the role
|
|
||||||
Operation {
|
|
||||||
query: format!("DROP OWNED BY {}", quoted),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
})
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
PerDatabasePhase::ChangeSchemaPerms => {
|
|
||||||
let ctx = ctx.read().await;
|
|
||||||
let databases = &ctx.dbs;
|
|
||||||
|
|
||||||
let db = match &db {
|
|
||||||
// ignore schema permissions on the system database
|
|
||||||
DB::SystemDB => return Ok(Box::new(empty())),
|
|
||||||
DB::UserDB(db) => db,
|
|
||||||
};
|
|
||||||
|
|
||||||
if databases.get(&db.name).is_none() {
|
|
||||||
bail!("database {} doesn't exist in PostgreSQL", db.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
let edb = databases.get(&db.name).unwrap();
|
|
||||||
|
|
||||||
if edb.restrict_conn || edb.invalid {
|
|
||||||
return Ok(Box::new(empty()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let operations = vec![
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
include_str!("sql/set_public_schema_owner.sql"),
|
|
||||||
db_owner = db.owner.pg_quote()
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: String::from(include_str!("sql/default_grants.sql")),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
PerDatabasePhase::HandleAnonExtension => {
|
|
||||||
// Only install Anon into user databases
|
|
||||||
let db = match &db {
|
|
||||||
DB::SystemDB => return Ok(Box::new(empty())),
|
|
||||||
DB::UserDB(db) => db,
|
|
||||||
};
|
|
||||||
// Never install Anon when it's not enabled as feature
|
|
||||||
if !spec.features.contains(&ComputeFeature::AnonExtension) {
|
|
||||||
return Ok(Box::new(empty()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only install Anon when it's added in preload libraries
|
|
||||||
let opt_libs = spec.cluster.settings.find("shared_preload_libraries");
|
|
||||||
|
|
||||||
let libs = match opt_libs {
|
|
||||||
Some(libs) => libs,
|
|
||||||
None => return Ok(Box::new(empty())),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !libs.contains("anon") {
|
|
||||||
return Ok(Box::new(empty()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let db_owner = db.owner.pg_quote();
|
|
||||||
|
|
||||||
let operations = vec![
|
|
||||||
// Create anon extension if this compute needs it
|
|
||||||
// Users cannot create it themselves, because superuser is required.
|
|
||||||
Operation {
|
|
||||||
query: String::from("CREATE EXTENSION IF NOT EXISTS anon CASCADE"),
|
|
||||||
comment: Some(String::from("creating anon extension")),
|
|
||||||
},
|
|
||||||
// Initialize anon extension
|
|
||||||
// This also requires superuser privileges, so users cannot do it themselves.
|
|
||||||
Operation {
|
|
||||||
query: String::from("SELECT anon.init()"),
|
|
||||||
comment: Some(String::from("initializing anon extension data")),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: format!("GRANT ALL ON SCHEMA anon TO {}", db_owner),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"granting anon extension schema permissions",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"GRANT ALL ON ALL FUNCTIONS IN SCHEMA anon TO {}",
|
|
||||||
db_owner
|
|
||||||
),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"granting anon extension schema functions permissions",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
// We need this, because some functions are defined as SECURITY DEFINER.
|
|
||||||
// In Postgres SECURITY DEFINER functions are executed with the privileges
|
|
||||||
// of the owner.
|
|
||||||
// In anon extension this it is needed to access some GUCs, which are only accessible to
|
|
||||||
// superuser. But we've patched postgres to allow db_owner to access them as well.
|
|
||||||
// So we need to change owner of these functions to db_owner.
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
include_str!("sql/anon_ext_fn_reassign.sql"),
|
|
||||||
db_owner = db_owner,
|
|
||||||
),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"change anon extension functions owner to database_owner",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"GRANT ALL ON ALL TABLES IN SCHEMA anon TO {}",
|
|
||||||
db_owner,
|
|
||||||
),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"granting anon extension tables permissions",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
"GRANT ALL ON ALL SEQUENCES IN SCHEMA anon TO {}",
|
|
||||||
db_owner,
|
|
||||||
),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"granting anon extension sequences permissions",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Interestingly, we only install p_s_s in the main database, even when
|
|
||||||
// it's preloaded.
|
|
||||||
ApplySpecPhase::HandleOtherExtensions => {
|
|
||||||
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
|
|
||||||
if libs.contains("pg_stat_statements") {
|
|
||||||
return Ok(Box::new(once(Operation {
|
|
||||||
query: String::from("CREATE EXTENSION IF NOT EXISTS pg_stat_statements"),
|
|
||||||
comment: Some(String::from("create system extensions")),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Box::new(empty()))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::HandleNeonExtension => {
|
|
||||||
let operations = vec![
|
|
||||||
Operation {
|
|
||||||
query: String::from("CREATE SCHEMA IF NOT EXISTS neon"),
|
|
||||||
comment: Some(String::from("init: add schema for extension")),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: String::from("CREATE EXTENSION IF NOT EXISTS neon WITH SCHEMA neon"),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"init: install the extension if not already installed",
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: String::from(
|
|
||||||
"UPDATE pg_extension SET extrelocatable = true WHERE extname = 'neon'",
|
|
||||||
),
|
|
||||||
comment: Some(String::from("compat/fix: make neon relocatable")),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: String::from("ALTER EXTENSION neon SET SCHEMA neon"),
|
|
||||||
comment: Some(String::from("compat/fix: alter neon extension schema")),
|
|
||||||
},
|
|
||||||
Operation {
|
|
||||||
query: String::from("ALTER EXTENSION neon UPDATE"),
|
|
||||||
comment: Some(String::from("compat/update: update neon extension version")),
|
|
||||||
},
|
|
||||||
]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
ApplySpecPhase::CreateAvailabilityCheck => Ok(Box::new(once(Operation {
|
|
||||||
query: String::from(include_str!("sql/add_availabilitycheck_tables.sql")),
|
|
||||||
comment: None,
|
|
||||||
}))),
|
|
||||||
ApplySpecPhase::DropRoles => {
|
|
||||||
let operations = spec
|
|
||||||
.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter(|op| op.action == "delete_role")
|
|
||||||
.map(|op| Operation {
|
|
||||||
query: format!("DROP ROLE IF EXISTS {}", op.name.pg_quote()),
|
|
||||||
comment: None,
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS(
|
|
||||||
SELECT 1
|
|
||||||
FROM pg_catalog.pg_tables
|
|
||||||
WHERE tablename = 'health_check'
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
CREATE TABLE health_check (
|
|
||||||
id serial primary key,
|
|
||||||
updated_at timestamptz default now()
|
|
||||||
);
|
|
||||||
INSERT INTO health_check VALUES (1, now())
|
|
||||||
ON CONFLICT (id) DO UPDATE
|
|
||||||
SET updated_at = now();
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
query varchar;
|
|
||||||
BEGIN
|
|
||||||
FOR query IN SELECT 'ALTER FUNCTION '||nsp.nspname||'.'||p.proname||'('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {db_owner};'
|
|
||||||
FROM pg_proc p
|
|
||||||
JOIN pg_namespace nsp ON p.pronamespace = nsp.oid
|
|
||||||
WHERE nsp.nspname = 'anon' LOOP
|
|
||||||
EXECUTE query;
|
|
||||||
END LOOP;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
DO
|
|
||||||
$$
|
|
||||||
BEGIN
|
|
||||||
IF EXISTS(
|
|
||||||
SELECT nspname
|
|
||||||
FROM pg_catalog.pg_namespace
|
|
||||||
WHERE nspname = 'public'
|
|
||||||
) AND
|
|
||||||
current_setting('server_version_num')::int / 10000 >= 15
|
|
||||||
THEN
|
|
||||||
IF EXISTS(
|
|
||||||
SELECT rolname
|
|
||||||
FROM pg_catalog.pg_roles
|
|
||||||
WHERE rolname = 'web_access'
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
GRANT CREATE ON SCHEMA public TO web_access;
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
IF EXISTS(
|
|
||||||
SELECT nspname
|
|
||||||
FROM pg_catalog.pg_namespace
|
|
||||||
WHERE nspname = 'public'
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser WITH GRANT OPTION;
|
|
||||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser WITH GRANT OPTION;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
DO
|
|
||||||
$$
|
|
||||||
DECLARE
|
|
||||||
schema_owner TEXT;
|
|
||||||
BEGIN
|
|
||||||
IF EXISTS(
|
|
||||||
SELECT nspname
|
|
||||||
FROM pg_catalog.pg_namespace
|
|
||||||
WHERE nspname = 'public'
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
SELECT nspowner::regrole::text
|
|
||||||
FROM pg_catalog.pg_namespace
|
|
||||||
WHERE nspname = 'public'
|
|
||||||
INTO schema_owner;
|
|
||||||
|
|
||||||
IF schema_owner = 'cloud_admin' OR schema_owner = 'zenith_admin'
|
|
||||||
THEN
|
|
||||||
ALTER SCHEMA public OWNER TO {db_owner};
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF EXISTS(
|
|
||||||
SELECT 1
|
|
||||||
FROM pg_catalog.pg_database
|
|
||||||
WHERE datname = {datname_str}
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
ALTER DATABASE {datname} is_template false;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
@@ -1153,7 +1153,6 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re
|
|||||||
timeline_info.timeline_id
|
timeline_info.timeline_id
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// TODO: rename to import-basebackup-plus-wal
|
|
||||||
TimelineCmd::Import(args) => {
|
TimelineCmd::Import(args) => {
|
||||||
let tenant_id = get_tenant_id(args.tenant_id, env)?;
|
let tenant_id = get_tenant_id(args.tenant_id, env)?;
|
||||||
let timeline_id = args.timeline_id;
|
let timeline_id = args.timeline_id;
|
||||||
|
|||||||
@@ -53,7 +53,6 @@ use compute_api::spec::Role;
|
|||||||
use nix::sys::signal::kill;
|
use nix::sys::signal::kill;
|
||||||
use nix::sys::signal::Signal;
|
use nix::sys::signal::Signal;
|
||||||
use pageserver_api::shard::ShardStripeSize;
|
use pageserver_api::shard::ShardStripeSize;
|
||||||
use reqwest::header::CONTENT_TYPE;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use url::Host;
|
use url::Host;
|
||||||
use utils::id::{NodeId, TenantId, TimelineId};
|
use utils::id::{NodeId, TenantId, TimelineId};
|
||||||
@@ -311,10 +310,6 @@ impl Endpoint {
|
|||||||
conf.append("wal_log_hints", "off");
|
conf.append("wal_log_hints", "off");
|
||||||
conf.append("max_replication_slots", "10");
|
conf.append("max_replication_slots", "10");
|
||||||
conf.append("hot_standby", "on");
|
conf.append("hot_standby", "on");
|
||||||
// Set to 1MB to both exercise getPage requests/LFC, and still have enough room for
|
|
||||||
// Postgres to operate. Everything smaller might be not enough for Postgres under load,
|
|
||||||
// and can cause errors like 'no unpinned buffers available', see
|
|
||||||
// <https://github.com/neondatabase/neon/issues/9956>
|
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
conf.append("fsync", "off");
|
conf.append("fsync", "off");
|
||||||
conf.append("max_connections", "100");
|
conf.append("max_connections", "100");
|
||||||
@@ -619,7 +614,6 @@ impl Endpoint {
|
|||||||
pgbouncer_settings: None,
|
pgbouncer_settings: None,
|
||||||
shard_stripe_size: Some(shard_stripe_size),
|
shard_stripe_size: Some(shard_stripe_size),
|
||||||
local_proxy_config: None,
|
local_proxy_config: None,
|
||||||
reconfigure_concurrency: 1,
|
|
||||||
};
|
};
|
||||||
let spec_path = self.endpoint_path().join("spec.json");
|
let spec_path = self.endpoint_path().join("spec.json");
|
||||||
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
|
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
|
||||||
@@ -819,7 +813,6 @@ impl Endpoint {
|
|||||||
self.http_address.ip(),
|
self.http_address.ip(),
|
||||||
self.http_address.port()
|
self.http_address.port()
|
||||||
))
|
))
|
||||||
.header(CONTENT_TYPE.as_str(), "application/json")
|
|
||||||
.body(format!(
|
.body(format!(
|
||||||
"{{\"spec\":{}}}",
|
"{{\"spec\":{}}}",
|
||||||
serde_json::to_string_pretty(&spec)?
|
serde_json::to_string_pretty(&spec)?
|
||||||
|
|||||||
@@ -415,11 +415,6 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<bool>())
|
.map(|x| x.parse::<bool>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'timeline_offloading' as bool")?,
|
.context("Failed to parse 'timeline_offloading' as bool")?,
|
||||||
wal_receiver_protocol_override: settings
|
|
||||||
.remove("wal_receiver_protocol_override")
|
|
||||||
.map(serde_json::from_str)
|
|
||||||
.transpose()
|
|
||||||
.context("parse `wal_receiver_protocol_override` from json")?,
|
|
||||||
};
|
};
|
||||||
if !settings.is_empty() {
|
if !settings.is_empty() {
|
||||||
bail!("Unrecognized tenant settings: {settings:?}")
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
//! ```text
|
//! ```text
|
||||||
//! .neon/safekeepers/<safekeeper id>
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
//! ```
|
//! ```
|
||||||
use std::error::Error as _;
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -27,7 +26,7 @@ use crate::{
|
|||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum SafekeeperHttpError {
|
pub enum SafekeeperHttpError {
|
||||||
#[error("request error: {0}{}", .0.source().map(|e| format!(": {e}")).unwrap_or_default())]
|
#[error("Reqwest error: {0}")]
|
||||||
Transport(#[from] reqwest::Error),
|
Transport(#[from] reqwest::Error),
|
||||||
|
|
||||||
#[error("Error: {0}")]
|
#[error("Error: {0}")]
|
||||||
|
|||||||
@@ -560,26 +560,14 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Command::TenantDescribe { tenant_id } => {
|
Command::TenantDescribe { tenant_id } => {
|
||||||
let TenantDescribeResponse {
|
let describe_response = storcon_client
|
||||||
tenant_id,
|
|
||||||
shards,
|
|
||||||
stripe_size,
|
|
||||||
policy,
|
|
||||||
config,
|
|
||||||
} = storcon_client
|
|
||||||
.dispatch::<(), TenantDescribeResponse>(
|
.dispatch::<(), TenantDescribeResponse>(
|
||||||
Method::GET,
|
Method::GET,
|
||||||
format!("control/v1/tenant/{tenant_id}"),
|
format!("control/v1/tenant/{tenant_id}"),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
println!("Tenant {tenant_id}");
|
let shards = describe_response.shards;
|
||||||
let mut table = comfy_table::Table::new();
|
|
||||||
table.add_row(["Policy", &format!("{:?}", policy)]);
|
|
||||||
table.add_row(["Stripe size", &format!("{:?}", stripe_size)]);
|
|
||||||
table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]);
|
|
||||||
println!("{table}");
|
|
||||||
println!("Shards:");
|
|
||||||
let mut table = comfy_table::Table::new();
|
let mut table = comfy_table::Table::new();
|
||||||
table.set_header(["Shard", "Attached", "Secondary", "Last error", "status"]);
|
table.set_header(["Shard", "Attached", "Secondary", "Last error", "status"]);
|
||||||
for shard in shards {
|
for shard in shards {
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ reason = "the marvin attack only affects private key decryption, not public key
|
|||||||
[licenses]
|
[licenses]
|
||||||
allow = [
|
allow = [
|
||||||
"Apache-2.0",
|
"Apache-2.0",
|
||||||
|
"Artistic-2.0",
|
||||||
"BSD-2-Clause",
|
"BSD-2-Clause",
|
||||||
"BSD-3-Clause",
|
"BSD-3-Clause",
|
||||||
"CC0-1.0",
|
"CC0-1.0",
|
||||||
@@ -66,7 +67,7 @@ registries = []
|
|||||||
# More documentation about the 'bans' section can be found here:
|
# More documentation about the 'bans' section can be found here:
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
|
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
|
||||||
[bans]
|
[bans]
|
||||||
multiple-versions = "allow"
|
multiple-versions = "warn"
|
||||||
wildcards = "allow"
|
wildcards = "allow"
|
||||||
highlight = "all"
|
highlight = "all"
|
||||||
workspace-default-features = "allow"
|
workspace-default-features = "allow"
|
||||||
|
|||||||
@@ -4,16 +4,14 @@ ARG TAG=latest
|
|||||||
|
|
||||||
FROM $REPOSITORY/${COMPUTE_IMAGE}:$TAG
|
FROM $REPOSITORY/${COMPUTE_IMAGE}:$TAG
|
||||||
|
|
||||||
ARG COMPUTE_IMAGE
|
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl \
|
apt-get install -y curl \
|
||||||
jq \
|
jq \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
netcat-openbsd
|
netcat
|
||||||
#Faker is required for the pg_anon test
|
#Faker is required for the pg_anon test
|
||||||
RUN case $COMPUTE_IMAGE in compute-node-v17) OPT="--break-system-packages";; *) OPT= ;; esac && pip3 install $OPT Faker
|
RUN pip3 install Faker
|
||||||
#This is required for the pg_hintplan test
|
#This is required for the pg_hintplan test
|
||||||
RUN mkdir -p /ext-src/pg_hint_plan-src && chown postgres /ext-src/pg_hint_plan-src
|
RUN mkdir -p /ext-src/pg_hint_plan-src && chown postgres /ext-src/pg_hint_plan-src
|
||||||
|
|
||||||
|
|||||||
@@ -30,17 +30,10 @@ cleanup() {
|
|||||||
docker compose --profile test-extensions -f $COMPOSE_FILE down
|
docker compose --profile test-extensions -f $COMPOSE_FILE down
|
||||||
}
|
}
|
||||||
|
|
||||||
for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
for pg_version in 14 15 16; do
|
||||||
pg_version=${pg_version/v/}
|
|
||||||
echo "clean up containers if exists"
|
echo "clean up containers if exists"
|
||||||
cleanup
|
cleanup
|
||||||
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version))
|
||||||
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
|
||||||
if [ $pg_version -eq 17 ]; then
|
|
||||||
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
|
||||||
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
|
||||||
jq 'del(.cluster.settings[] | select (.name == "session_preload_libraries"))' $SPEC_PATH/spec.bak > $SPEC_PATH/spec.json
|
|
||||||
fi
|
|
||||||
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
||||||
|
|
||||||
echo "wait until the compute is ready. timeout after 60s. "
|
echo "wait until the compute is ready. timeout after 60s. "
|
||||||
@@ -61,7 +54,8 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $pg_version -ge 16 ]; then
|
if [ $pg_version -ge 16 ]
|
||||||
|
then
|
||||||
echo Enabling trust connection
|
echo Enabling trust connection
|
||||||
docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' "
|
docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' "
|
||||||
echo Adding postgres role
|
echo Adding postgres role
|
||||||
@@ -74,13 +68,10 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
# The test assumes that it is running on the same host with the postgres engine.
|
# The test assumes that it is running on the same host with the postgres engine.
|
||||||
# In our case it's not true, that's why we are copying files to the compute node
|
# In our case it's not true, that's why we are copying files to the compute node
|
||||||
TMPDIR=$(mktemp -d)
|
TMPDIR=$(mktemp -d)
|
||||||
# Add support for pg_anon for pg_v16
|
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data
|
||||||
if [ $pg_version -ne 17 ]; then
|
echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv
|
||||||
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data
|
docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data
|
||||||
echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv
|
|
||||||
docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data
|
|
||||||
rm -rf $TMPDIR
|
rm -rf $TMPDIR
|
||||||
fi
|
|
||||||
TMPDIR=$(mktemp -d)
|
TMPDIR=$(mktemp -d)
|
||||||
# The following block does the same for the pg_hintplan test
|
# The following block does the same for the pg_hintplan test
|
||||||
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data
|
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data
|
||||||
@@ -106,8 +97,4 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
cleanup
|
cleanup
|
||||||
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
|
||||||
if [ $pg_version -eq 17 ]; then
|
|
||||||
mv $SPEC_PATH/spec.bak $SPEC_PATH/spec.json
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -113,21 +113,21 @@ so manual installation of dependencies is not recommended.
|
|||||||
A single virtual environment with all dependencies is described in the single `Pipfile`.
|
A single virtual environment with all dependencies is described in the single `Pipfile`.
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
- Install Python 3.11 (the minimal supported version) or greater.
|
- Install Python 3.9 (the minimal supported version) or greater.
|
||||||
- Our setup with poetry should work with newer python versions too. So feel free to open an issue with a `c/test-runner` label if something doesn't work as expected.
|
- Our setup with poetry should work with newer python versions too. So feel free to open an issue with a `c/test-runner` label if something doesn't work as expected.
|
||||||
- If you have some trouble with other version you can resolve it by installing Python 3.11 separately, via [pyenv](https://github.com/pyenv/pyenv) or via system package manager e.g.:
|
- If you have some trouble with other version you can resolve it by installing Python 3.9 separately, via [pyenv](https://github.com/pyenv/pyenv) or via system package manager e.g.:
|
||||||
```bash
|
```bash
|
||||||
# In Ubuntu
|
# In Ubuntu
|
||||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install python3.11
|
sudo apt install python3.9
|
||||||
```
|
```
|
||||||
- Install `poetry`
|
- Install `poetry`
|
||||||
- Exact version of `poetry` is not important, see installation instructions available at poetry's [website](https://python-poetry.org/docs/#installation).
|
- Exact version of `poetry` is not important, see installation instructions available at poetry's [website](https://python-poetry.org/docs/#installation).
|
||||||
- Install dependencies via `./scripts/pysync`.
|
- Install dependencies via `./scripts/pysync`.
|
||||||
- Note that CI uses specific Python version (look for `PYTHON_VERSION` [here](https://github.com/neondatabase/docker-images/blob/main/rust/Dockerfile))
|
- Note that CI uses specific Python version (look for `PYTHON_VERSION` [here](https://github.com/neondatabase/docker-images/blob/main/rust/Dockerfile))
|
||||||
so if you have different version some linting tools can yield different result locally vs in the CI.
|
so if you have different version some linting tools can yield different result locally vs in the CI.
|
||||||
- You can explicitly specify which Python to use by running `poetry env use /path/to/python`, e.g. `poetry env use python3.11`.
|
- You can explicitly specify which Python to use by running `poetry env use /path/to/python`, e.g. `poetry env use python3.9`.
|
||||||
This may also disable the `The currently activated Python version X.Y.Z is not supported by the project` warning.
|
This may also disable the `The currently activated Python version X.Y.Z is not supported by the project` warning.
|
||||||
|
|
||||||
Run `poetry shell` to activate the virtual environment.
|
Run `poetry shell` to activate the virtual environment.
|
||||||
|
|||||||
@@ -19,10 +19,6 @@ pub type PgIdent = String;
|
|||||||
/// String type alias representing Postgres extension version
|
/// String type alias representing Postgres extension version
|
||||||
pub type ExtVersion = String;
|
pub type ExtVersion = String;
|
||||||
|
|
||||||
fn default_reconfigure_concurrency() -> usize {
|
|
||||||
1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cluster spec or configuration represented as an optional number of
|
/// Cluster spec or configuration represented as an optional number of
|
||||||
/// delta operations + final cluster state description.
|
/// delta operations + final cluster state description.
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||||
@@ -71,7 +67,7 @@ pub struct ComputeSpec {
|
|||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|
||||||
/// An optional hint that can be passed to speed up startup time if we know
|
/// An optinal hint that can be passed to speed up startup time if we know
|
||||||
/// that no pg catalog mutations (like role creation, database creation,
|
/// that no pg catalog mutations (like role creation, database creation,
|
||||||
/// extension creation) need to be done on the actual database to start.
|
/// extension creation) need to be done on the actual database to start.
|
||||||
#[serde(default)] // Default false
|
#[serde(default)] // Default false
|
||||||
@@ -90,7 +86,9 @@ pub struct ComputeSpec {
|
|||||||
// etc. GUCs in cluster.settings. TODO: Once the control plane has been
|
// etc. GUCs in cluster.settings. TODO: Once the control plane has been
|
||||||
// updated to fill these fields, we can make these non optional.
|
// updated to fill these fields, we can make these non optional.
|
||||||
pub tenant_id: Option<TenantId>,
|
pub tenant_id: Option<TenantId>,
|
||||||
|
|
||||||
pub timeline_id: Option<TimelineId>,
|
pub timeline_id: Option<TimelineId>,
|
||||||
|
|
||||||
pub pageserver_connstring: Option<String>,
|
pub pageserver_connstring: Option<String>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@@ -115,20 +113,6 @@ pub struct ComputeSpec {
|
|||||||
/// Local Proxy configuration used for JWT authentication
|
/// Local Proxy configuration used for JWT authentication
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub local_proxy_config: Option<LocalProxySpec>,
|
pub local_proxy_config: Option<LocalProxySpec>,
|
||||||
|
|
||||||
/// Number of concurrent connections during the parallel RunInEachDatabase
|
|
||||||
/// phase of the apply config process.
|
|
||||||
///
|
|
||||||
/// We need a higher concurrency during reconfiguration in case of many DBs,
|
|
||||||
/// but instance is already running and used by client. We can easily get out of
|
|
||||||
/// `max_connections` limit, and the current code won't handle that.
|
|
||||||
///
|
|
||||||
/// Default is 1, but also allow control plane to override this value for specific
|
|
||||||
/// projects. It's also recommended to bump `superuser_reserved_connections` +=
|
|
||||||
/// `reconfigure_concurrency` for such projects to ensure that we always have
|
|
||||||
/// enough spare connections for reconfiguration process to succeed.
|
|
||||||
#[serde(default = "default_reconfigure_concurrency")]
|
|
||||||
pub reconfigure_concurrency: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
||||||
@@ -331,9 +315,6 @@ mod tests {
|
|||||||
|
|
||||||
// Features list defaults to empty vector.
|
// Features list defaults to empty vector.
|
||||||
assert!(spec.features.is_empty());
|
assert!(spec.features.is_empty());
|
||||||
|
|
||||||
// Reconfigure concurrency defaults to 1.
|
|
||||||
assert_eq!(spec.reconfigure_concurrency, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -103,12 +103,11 @@ impl<'a> IdempotencyKey<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Split into chunks of 1000 metrics to avoid exceeding the max request size.
|
|
||||||
pub const CHUNK_SIZE: usize = 1000;
|
pub const CHUNK_SIZE: usize = 1000;
|
||||||
|
|
||||||
// Just a wrapper around a slice of events
|
// Just a wrapper around a slice of events
|
||||||
// to serialize it as `{"events" : [ ] }
|
// to serialize it as `{"events" : [ ] }
|
||||||
#[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)]
|
#[derive(serde::Serialize, Deserialize)]
|
||||||
pub struct EventChunk<'a, T: Clone + PartialEq> {
|
pub struct EventChunk<'a, T: Clone> {
|
||||||
pub events: std::borrow::Cow<'a, [T]>,
|
pub events: std::borrow::Cow<'a, [T]>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,28 +2,14 @@
|
|||||||
|
|
||||||
// This module has heavy inspiration from the prometheus crate's `process_collector.rs`.
|
// This module has heavy inspiration from the prometheus crate's `process_collector.rs`.
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use prometheus::Gauge;
|
|
||||||
|
|
||||||
use crate::UIntGauge;
|
use crate::UIntGauge;
|
||||||
|
|
||||||
pub struct Collector {
|
pub struct Collector {
|
||||||
descs: Vec<prometheus::core::Desc>,
|
descs: Vec<prometheus::core::Desc>,
|
||||||
vmlck: crate::UIntGauge,
|
vmlck: crate::UIntGauge,
|
||||||
cpu_seconds_highres: Gauge,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const NMETRICS: usize = 2;
|
const NMETRICS: usize = 1;
|
||||||
|
|
||||||
static CLK_TCK_F64: Lazy<f64> = Lazy::new(|| {
|
|
||||||
let long = unsafe { libc::sysconf(libc::_SC_CLK_TCK) };
|
|
||||||
if long == -1 {
|
|
||||||
panic!("sysconf(_SC_CLK_TCK) failed");
|
|
||||||
}
|
|
||||||
let convertible_to_f64: i32 =
|
|
||||||
i32::try_from(long).expect("sysconf(_SC_CLK_TCK) is larger than i32");
|
|
||||||
convertible_to_f64 as f64
|
|
||||||
});
|
|
||||||
|
|
||||||
impl prometheus::core::Collector for Collector {
|
impl prometheus::core::Collector for Collector {
|
||||||
fn desc(&self) -> Vec<&prometheus::core::Desc> {
|
fn desc(&self) -> Vec<&prometheus::core::Desc> {
|
||||||
@@ -41,12 +27,6 @@ impl prometheus::core::Collector for Collector {
|
|||||||
mfs.extend(self.vmlck.collect())
|
mfs.extend(self.vmlck.collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Ok(stat) = myself.stat() {
|
|
||||||
let cpu_seconds = stat.utime + stat.stime;
|
|
||||||
self.cpu_seconds_highres
|
|
||||||
.set(cpu_seconds as f64 / *CLK_TCK_F64);
|
|
||||||
mfs.extend(self.cpu_seconds_highres.collect());
|
|
||||||
}
|
|
||||||
mfs
|
mfs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,23 +43,7 @@ impl Collector {
|
|||||||
.cloned(),
|
.cloned(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let cpu_seconds_highres = Gauge::new(
|
Self { descs, vmlck }
|
||||||
"libmetrics_process_cpu_seconds_highres",
|
|
||||||
"Total user and system CPU time spent in seconds.\
|
|
||||||
Sub-second resolution, hence better than `process_cpu_seconds_total`.",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
descs.extend(
|
|
||||||
prometheus::core::Collector::desc(&cpu_seconds_highres)
|
|
||||||
.into_iter()
|
|
||||||
.cloned(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
descs,
|
|
||||||
vmlck,
|
|
||||||
cpu_seconds_highres,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ remote_storage.workspace = true
|
|||||||
postgres_backend.workspace = true
|
postgres_backend.workspace = true
|
||||||
nix = {workspace = true, optional = true}
|
nix = {workspace = true, optional = true}
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
rand.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
bincode.workspace = true
|
bincode.workspace = true
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::{
|
|||||||
str::FromStr,
|
str::FromStr,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
use utils::{logging::LogFormat, postgres_client::PostgresClientProtocol};
|
use utils::logging::LogFormat;
|
||||||
|
|
||||||
use crate::models::ImageCompressionAlgorithm;
|
use crate::models::ImageCompressionAlgorithm;
|
||||||
use crate::models::LsnLease;
|
use crate::models::LsnLease;
|
||||||
@@ -97,15 +97,6 @@ pub struct ConfigToml {
|
|||||||
pub control_plane_api: Option<reqwest::Url>,
|
pub control_plane_api: Option<reqwest::Url>,
|
||||||
pub control_plane_api_token: Option<String>,
|
pub control_plane_api_token: Option<String>,
|
||||||
pub control_plane_emergency_mode: bool,
|
pub control_plane_emergency_mode: bool,
|
||||||
/// Unstable feature: subject to change or removal without notice.
|
|
||||||
/// See <https://github.com/neondatabase/neon/pull/9218>.
|
|
||||||
pub import_pgdata_upcall_api: Option<reqwest::Url>,
|
|
||||||
/// Unstable feature: subject to change or removal without notice.
|
|
||||||
/// See <https://github.com/neondatabase/neon/pull/9218>.
|
|
||||||
pub import_pgdata_upcall_api_token: Option<String>,
|
|
||||||
/// Unstable feature: subject to change or removal without notice.
|
|
||||||
/// See <https://github.com/neondatabase/neon/pull/9218>.
|
|
||||||
pub import_pgdata_aws_endpoint_url: Option<reqwest::Url>,
|
|
||||||
pub heatmap_upload_concurrency: usize,
|
pub heatmap_upload_concurrency: usize,
|
||||||
pub secondary_download_concurrency: usize,
|
pub secondary_download_concurrency: usize,
|
||||||
pub virtual_file_io_engine: Option<crate::models::virtual_file::IoEngineKind>,
|
pub virtual_file_io_engine: Option<crate::models::virtual_file::IoEngineKind>,
|
||||||
@@ -118,8 +109,6 @@ pub struct ConfigToml {
|
|||||||
pub virtual_file_io_mode: Option<crate::models::virtual_file::IoMode>,
|
pub virtual_file_io_mode: Option<crate::models::virtual_file::IoMode>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub no_sync: Option<bool>,
|
pub no_sync: Option<bool>,
|
||||||
pub wal_receiver_protocol: PostgresClientProtocol,
|
|
||||||
pub page_service_pipelining: PageServicePipeliningConfig,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||||
@@ -136,28 +125,6 @@ pub struct DiskUsageEvictionTaskConfig {
|
|||||||
pub eviction_order: EvictionOrder,
|
pub eviction_order: EvictionOrder,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
|
||||||
#[serde(tag = "mode", rename_all = "kebab-case")]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub enum PageServicePipeliningConfig {
|
|
||||||
Serial,
|
|
||||||
Pipelined(PageServicePipeliningConfigPipelined),
|
|
||||||
}
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct PageServicePipeliningConfigPipelined {
|
|
||||||
/// Causes runtime errors if larger than max get_vectored batch size.
|
|
||||||
pub max_batch_size: NonZeroUsize,
|
|
||||||
pub execution: PageServiceProtocolPipelinedExecutionStrategy,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum PageServiceProtocolPipelinedExecutionStrategy {
|
|
||||||
ConcurrentFutures,
|
|
||||||
Tasks,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod statvfs {
|
pub mod statvfs {
|
||||||
pub mod mock {
|
pub mod mock {
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||||
@@ -299,8 +266,6 @@ pub struct TenantConfigToml {
|
|||||||
/// Enable auto-offloading of timelines.
|
/// Enable auto-offloading of timelines.
|
||||||
/// (either this flag or the pageserver-global one need to be set)
|
/// (either this flag or the pageserver-global one need to be set)
|
||||||
pub timeline_offloading: bool,
|
pub timeline_offloading: bool,
|
||||||
|
|
||||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod defaults {
|
pub mod defaults {
|
||||||
@@ -352,9 +317,6 @@ pub mod defaults {
|
|||||||
pub const DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB: usize = 0;
|
pub const DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB: usize = 0;
|
||||||
|
|
||||||
pub const DEFAULT_IO_BUFFER_ALIGNMENT: usize = 512;
|
pub const DEFAULT_IO_BUFFER_ALIGNMENT: usize = 512;
|
||||||
|
|
||||||
pub const DEFAULT_WAL_RECEIVER_PROTOCOL: utils::postgres_client::PostgresClientProtocol =
|
|
||||||
utils::postgres_client::PostgresClientProtocol::Vanilla;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ConfigToml {
|
impl Default for ConfigToml {
|
||||||
@@ -420,10 +382,6 @@ impl Default for ConfigToml {
|
|||||||
control_plane_api_token: (None),
|
control_plane_api_token: (None),
|
||||||
control_plane_emergency_mode: (false),
|
control_plane_emergency_mode: (false),
|
||||||
|
|
||||||
import_pgdata_upcall_api: (None),
|
|
||||||
import_pgdata_upcall_api_token: (None),
|
|
||||||
import_pgdata_aws_endpoint_url: (None),
|
|
||||||
|
|
||||||
heatmap_upload_concurrency: (DEFAULT_HEATMAP_UPLOAD_CONCURRENCY),
|
heatmap_upload_concurrency: (DEFAULT_HEATMAP_UPLOAD_CONCURRENCY),
|
||||||
secondary_download_concurrency: (DEFAULT_SECONDARY_DOWNLOAD_CONCURRENCY),
|
secondary_download_concurrency: (DEFAULT_SECONDARY_DOWNLOAD_CONCURRENCY),
|
||||||
|
|
||||||
@@ -441,15 +399,6 @@ impl Default for ConfigToml {
|
|||||||
virtual_file_io_mode: None,
|
virtual_file_io_mode: None,
|
||||||
tenant_config: TenantConfigToml::default(),
|
tenant_config: TenantConfigToml::default(),
|
||||||
no_sync: None,
|
no_sync: None,
|
||||||
wal_receiver_protocol: DEFAULT_WAL_RECEIVER_PROTOCOL,
|
|
||||||
page_service_pipelining: if !cfg!(test) {
|
|
||||||
PageServicePipeliningConfig::Serial
|
|
||||||
} else {
|
|
||||||
PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
|
|
||||||
max_batch_size: NonZeroUsize::new(32).unwrap(),
|
|
||||||
execution: PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -537,7 +486,6 @@ impl Default for TenantConfigToml {
|
|||||||
lsn_lease_length: LsnLease::DEFAULT_LENGTH,
|
lsn_lease_length: LsnLease::DEFAULT_LENGTH,
|
||||||
lsn_lease_length_for_ts: LsnLease::DEFAULT_LENGTH_FOR_TS,
|
lsn_lease_length_for_ts: LsnLease::DEFAULT_LENGTH_FOR_TS,
|
||||||
timeline_offloading: false,
|
timeline_offloading: false,
|
||||||
wal_receiver_protocol_override: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ pub struct TenantCreateResponse {
|
|||||||
pub shards: Vec<TenantCreateResponseShard>,
|
pub shards: Vec<TenantCreateResponseShard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct NodeRegisterRequest {
|
pub struct NodeRegisterRequest {
|
||||||
pub node_id: NodeId,
|
pub node_id: NodeId,
|
||||||
|
|
||||||
@@ -75,7 +75,7 @@ pub struct TenantPolicyRequest {
|
|||||||
pub scheduling: Option<ShardSchedulingPolicy>,
|
pub scheduling: Option<ShardSchedulingPolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||||
pub struct AvailabilityZone(pub String);
|
pub struct AvailabilityZone(pub String);
|
||||||
|
|
||||||
impl Display for AvailabilityZone {
|
impl Display for AvailabilityZone {
|
||||||
|
|||||||
@@ -229,18 +229,6 @@ impl Key {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompactKey {
|
|
||||||
pub fn raw(&self) -> i128 {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<i128> for CompactKey {
|
|
||||||
fn from(value: i128) -> Self {
|
|
||||||
Self(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Key {
|
impl fmt::Display for Key {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
@@ -770,11 +758,6 @@ impl Key {
|
|||||||
&& self.field6 == 1
|
&& self.field6 == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_aux_file_key(&self) -> bool {
|
|
||||||
self.field1 == AUX_KEY_PREFIX
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Guaranteed to return `Ok()` if [`Self::is_rel_block_key`] returns `true` for `key`.
|
/// Guaranteed to return `Ok()` if [`Self::is_rel_block_key`] returns `true` for `key`.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn to_rel_block(self) -> anyhow::Result<(RelTag, BlockNumber)> {
|
pub fn to_rel_block(self) -> anyhow::Result<(RelTag, BlockNumber)> {
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ pub struct ShardedRange<'a> {
|
|||||||
|
|
||||||
// Calculate the size of a range within the blocks of the same relation, or spanning only the
|
// Calculate the size of a range within the blocks of the same relation, or spanning only the
|
||||||
// top page in the previous relation's space.
|
// top page in the previous relation's space.
|
||||||
pub fn contiguous_range_len(range: &Range<Key>) -> u32 {
|
fn contiguous_range_len(range: &Range<Key>) -> u32 {
|
||||||
debug_assert!(is_contiguous_range(range));
|
debug_assert!(is_contiguous_range(range));
|
||||||
if range.start.field6 == 0xffffffff {
|
if range.start.field6 == 0xffffffff {
|
||||||
range.end.field6 + 1
|
range.end.field6 + 1
|
||||||
@@ -67,7 +67,7 @@ pub fn contiguous_range_len(range: &Range<Key>) -> u32 {
|
|||||||
/// This matters, because:
|
/// This matters, because:
|
||||||
/// - Within such ranges, keys are used contiguously. Outside such ranges it is sparse.
|
/// - Within such ranges, keys are used contiguously. Outside such ranges it is sparse.
|
||||||
/// - Within such ranges, we may calculate distances using simple subtraction of field6.
|
/// - Within such ranges, we may calculate distances using simple subtraction of field6.
|
||||||
pub fn is_contiguous_range(range: &Range<Key>) -> bool {
|
fn is_contiguous_range(range: &Range<Key>) -> bool {
|
||||||
range.start.field1 == range.end.field1
|
range.start.field1 == range.end.field1
|
||||||
&& range.start.field2 == range.end.field2
|
&& range.start.field2 == range.end.field2
|
||||||
&& range.start.field3 == range.end.field3
|
&& range.start.field3 == range.end.field3
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ pub mod detach_ancestor;
|
|||||||
pub mod partitioning;
|
pub mod partitioning;
|
||||||
pub mod utilization;
|
pub mod utilization;
|
||||||
|
|
||||||
#[cfg(feature = "testing")]
|
|
||||||
use camino::Utf8PathBuf;
|
|
||||||
pub use utilization::PageserverUtilization;
|
pub use utilization::PageserverUtilization;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
@@ -23,7 +21,6 @@ use utils::{
|
|||||||
completion,
|
completion,
|
||||||
id::{NodeId, TenantId, TimelineId},
|
id::{NodeId, TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_client::PostgresClientProtocol,
|
|
||||||
serde_system_time,
|
serde_system_time,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -230,9 +227,6 @@ pub enum TimelineCreateRequestMode {
|
|||||||
// we continue to accept it by having it here.
|
// we continue to accept it by having it here.
|
||||||
pg_version: Option<u32>,
|
pg_version: Option<u32>,
|
||||||
},
|
},
|
||||||
ImportPgdata {
|
|
||||||
import_pgdata: TimelineCreateRequestModeImportPgdata,
|
|
||||||
},
|
|
||||||
// NB: Bootstrap is all-optional, and thus the serde(untagged) will cause serde to stop at Bootstrap.
|
// NB: Bootstrap is all-optional, and thus the serde(untagged) will cause serde to stop at Bootstrap.
|
||||||
// (serde picks the first matching enum variant, in declaration order).
|
// (serde picks the first matching enum variant, in declaration order).
|
||||||
Bootstrap {
|
Bootstrap {
|
||||||
@@ -242,42 +236,6 @@ pub enum TimelineCreateRequestMode {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
|
||||||
pub struct TimelineCreateRequestModeImportPgdata {
|
|
||||||
pub location: ImportPgdataLocation,
|
|
||||||
pub idempotency_key: ImportPgdataIdempotencyKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
|
||||||
pub enum ImportPgdataLocation {
|
|
||||||
#[cfg(feature = "testing")]
|
|
||||||
LocalFs { path: Utf8PathBuf },
|
|
||||||
AwsS3 {
|
|
||||||
region: String,
|
|
||||||
bucket: String,
|
|
||||||
/// A better name for this would be `prefix`; changing requires coordination with cplane.
|
|
||||||
/// See <https://github.com/neondatabase/cloud/issues/20646>.
|
|
||||||
key: String,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
|
||||||
#[serde(transparent)]
|
|
||||||
pub struct ImportPgdataIdempotencyKey(pub String);
|
|
||||||
|
|
||||||
impl ImportPgdataIdempotencyKey {
|
|
||||||
pub fn random() -> Self {
|
|
||||||
use rand::{distributions::Alphanumeric, Rng};
|
|
||||||
Self(
|
|
||||||
rand::thread_rng()
|
|
||||||
.sample_iter(&Alphanumeric)
|
|
||||||
.take(20)
|
|
||||||
.map(char::from)
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
pub struct LsnLeaseRequest {
|
pub struct LsnLeaseRequest {
|
||||||
pub lsn: Lsn,
|
pub lsn: Lsn,
|
||||||
@@ -353,7 +311,6 @@ pub struct TenantConfig {
|
|||||||
pub lsn_lease_length: Option<String>,
|
pub lsn_lease_length: Option<String>,
|
||||||
pub lsn_lease_length_for_ts: Option<String>,
|
pub lsn_lease_length_for_ts: Option<String>,
|
||||||
pub timeline_offloading: Option<bool>,
|
pub timeline_offloading: Option<bool>,
|
||||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The policy for the aux file storage.
|
/// The policy for the aux file storage.
|
||||||
@@ -501,9 +458,7 @@ pub struct EvictionPolicyLayerAccessThreshold {
|
|||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||||
pub struct ThrottleConfig {
|
pub struct ThrottleConfig {
|
||||||
/// See [`ThrottleConfigTaskKinds`] for why we do the serde `rename`.
|
pub task_kinds: Vec<String>, // TaskKind
|
||||||
#[serde(rename = "task_kinds")]
|
|
||||||
pub enabled: ThrottleConfigTaskKinds,
|
|
||||||
pub initial: u32,
|
pub initial: u32,
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
pub refill_interval: Duration,
|
pub refill_interval: Duration,
|
||||||
@@ -511,38 +466,10 @@ pub struct ThrottleConfig {
|
|||||||
pub max: u32,
|
pub max: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Before <https://github.com/neondatabase/neon/pull/9962>
|
|
||||||
/// the throttle was a per `Timeline::get`/`Timeline::get_vectored` call.
|
|
||||||
/// The `task_kinds` field controlled which Pageserver "Task Kind"s
|
|
||||||
/// were subject to the throttle.
|
|
||||||
///
|
|
||||||
/// After that PR, the throttle is applied at pagestream request level
|
|
||||||
/// and the `task_kinds` field does not apply since the only task kind
|
|
||||||
/// that us subject to the throttle is that of the page service.
|
|
||||||
///
|
|
||||||
/// However, we don't want to make a breaking config change right now
|
|
||||||
/// because it means we have to migrate all the tenant configs.
|
|
||||||
/// This will be done in a future PR.
|
|
||||||
///
|
|
||||||
/// In the meantime, we use emptiness / non-emptsiness of the `task_kinds`
|
|
||||||
/// field to determine if the throttle is enabled or not.
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
|
||||||
#[serde(transparent)]
|
|
||||||
pub struct ThrottleConfigTaskKinds(Vec<String>);
|
|
||||||
|
|
||||||
impl ThrottleConfigTaskKinds {
|
|
||||||
pub fn disabled() -> Self {
|
|
||||||
Self(vec![])
|
|
||||||
}
|
|
||||||
pub fn is_enabled(&self) -> bool {
|
|
||||||
!self.0.is_empty()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ThrottleConfig {
|
impl ThrottleConfig {
|
||||||
pub fn disabled() -> Self {
|
pub fn disabled() -> Self {
|
||||||
Self {
|
Self {
|
||||||
enabled: ThrottleConfigTaskKinds::disabled(),
|
task_kinds: vec![], // effectively disables the throttle
|
||||||
// other values don't matter with emtpy `task_kinds`.
|
// other values don't matter with emtpy `task_kinds`.
|
||||||
initial: 0,
|
initial: 0,
|
||||||
refill_interval: Duration::from_millis(1),
|
refill_interval: Duration::from_millis(1),
|
||||||
@@ -556,30 +483,6 @@ impl ThrottleConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod throttle_config_tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_disabled_is_disabled() {
|
|
||||||
let config = ThrottleConfig::disabled();
|
|
||||||
assert!(!config.enabled.is_enabled());
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn test_enabled_backwards_compat() {
|
|
||||||
let input = serde_json::json!({
|
|
||||||
"task_kinds": ["PageRequestHandler"],
|
|
||||||
"initial": 40000,
|
|
||||||
"refill_interval": "50ms",
|
|
||||||
"refill_amount": 1000,
|
|
||||||
"max": 40000,
|
|
||||||
"fair": true
|
|
||||||
});
|
|
||||||
let config: ThrottleConfig = serde_json::from_value(input).unwrap();
|
|
||||||
assert!(config.enabled.is_enabled());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A flattened analog of a `pagesever::tenant::LocationMode`, which
|
/// A flattened analog of a `pagesever::tenant::LocationMode`, which
|
||||||
/// lists out all possible states (and the virtual "Detached" state)
|
/// lists out all possible states (and the virtual "Detached" state)
|
||||||
/// in a flat form rather than using rust-style enums.
|
/// in a flat form rather than using rust-style enums.
|
||||||
|
|||||||
@@ -158,8 +158,7 @@ impl ShardIdentity {
|
|||||||
key_to_shard_number(self.count, self.stripe_size, key)
|
key_to_shard_number(self.count, self.stripe_size, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the key is stored only on this shard. This does not include
|
/// Return true if the key should be ingested by this shard
|
||||||
/// global keys, see is_key_global().
|
|
||||||
///
|
///
|
||||||
/// Shards must ingest _at least_ keys which return true from this check.
|
/// Shards must ingest _at least_ keys which return true from this check.
|
||||||
pub fn is_key_local(&self, key: &Key) -> bool {
|
pub fn is_key_local(&self, key: &Key) -> bool {
|
||||||
@@ -171,37 +170,19 @@ impl ShardIdentity {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the key should be stored on all shards, not just one.
|
|
||||||
pub fn is_key_global(&self, key: &Key) -> bool {
|
|
||||||
if key.is_slru_block_key() || key.is_slru_segment_size_key() || key.is_aux_file_key() {
|
|
||||||
// Special keys that are only stored on shard 0
|
|
||||||
false
|
|
||||||
} else if key.is_rel_block_key() {
|
|
||||||
// Ordinary relation blocks are distributed across shards
|
|
||||||
false
|
|
||||||
} else if key.is_rel_size_key() {
|
|
||||||
// All shards maintain rel size keys (although only shard 0 is responsible for
|
|
||||||
// keeping it strictly accurate, other shards just reflect the highest block they've ingested)
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
// For everything else, we assume it must be kept everywhere, because ingest code
|
|
||||||
// might assume this -- this covers functionality where the ingest code has
|
|
||||||
// not (yet) been made fully shard aware.
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return true if the key should be discarded if found in this shard's
|
/// Return true if the key should be discarded if found in this shard's
|
||||||
/// data store, e.g. during compaction after a split.
|
/// data store, e.g. during compaction after a split.
|
||||||
///
|
///
|
||||||
/// Shards _may_ drop keys which return false here, but are not obliged to.
|
/// Shards _may_ drop keys which return false here, but are not obliged to.
|
||||||
pub fn is_key_disposable(&self, key: &Key) -> bool {
|
pub fn is_key_disposable(&self, key: &Key) -> bool {
|
||||||
if self.count < ShardCount(2) {
|
if key_is_shard0(key) {
|
||||||
// Fast path: unsharded tenant doesn't dispose of anything
|
// Q: Why can't we dispose of shard0 content if we're not shard 0?
|
||||||
return false;
|
// A1: because the WAL ingestion logic currently ingests some shard 0
|
||||||
}
|
// content on all shards, even though it's only read on shard 0. If we
|
||||||
|
// dropped it, then subsequent WAL ingest to these keys would encounter
|
||||||
if self.is_key_global(key) {
|
// an error.
|
||||||
|
// A2: because key_is_shard0 also covers relation size keys, which are written
|
||||||
|
// on all shards even though they're only maintained accurately on shard 0.
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
!self.is_key_local(key)
|
!self.is_key_local(key)
|
||||||
|
|||||||
@@ -716,9 +716,6 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Proto looks like this:
|
|
||||||
// FeMessage::Query("pagestream_v2{FeMessage::CopyData(PagesetreamFeMessage::GetPage(..))}")
|
|
||||||
|
|
||||||
async fn process_message(
|
async fn process_message(
|
||||||
&mut self,
|
&mut self,
|
||||||
handler: &mut impl Handler<IO>,
|
handler: &mut impl Handler<IO>,
|
||||||
@@ -834,7 +831,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
use CopyStreamHandlerEnd::*;
|
use CopyStreamHandlerEnd::*;
|
||||||
|
|
||||||
let expected_end = match &end {
|
let expected_end = match &end {
|
||||||
ServerInitiated(_) | CopyDone | CopyFail | Terminate | EOF | Cancelled => true,
|
ServerInitiated(_) | CopyDone | CopyFail | Terminate | EOF => true,
|
||||||
CopyStreamHandlerEnd::Disconnected(ConnectionError::Io(io_error))
|
CopyStreamHandlerEnd::Disconnected(ConnectionError::Io(io_error))
|
||||||
if is_expected_io_error(io_error) =>
|
if is_expected_io_error(io_error) =>
|
||||||
{
|
{
|
||||||
@@ -874,9 +871,6 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
// message from server' when it receives ErrorResponse (anything but
|
// message from server' when it receives ErrorResponse (anything but
|
||||||
// CopyData/CopyDone) back.
|
// CopyData/CopyDone) back.
|
||||||
CopyFail => Some((end.to_string(), SQLSTATE_SUCCESSFUL_COMPLETION)),
|
CopyFail => Some((end.to_string(), SQLSTATE_SUCCESSFUL_COMPLETION)),
|
||||||
|
|
||||||
// When cancelled, send no response: we must not risk blocking on sending that response
|
|
||||||
Cancelled => None,
|
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
if let Some((err, errcode)) = err_to_send_and_errcode {
|
if let Some((err, errcode)) = err_to_send_and_errcode {
|
||||||
@@ -1054,8 +1048,6 @@ pub enum CopyStreamHandlerEnd {
|
|||||||
/// The connection was lost
|
/// The connection was lost
|
||||||
#[error("connection error: {0}")]
|
#[error("connection error: {0}")]
|
||||||
Disconnected(#[from] ConnectionError),
|
Disconnected(#[from] ConnectionError),
|
||||||
#[error("Shutdown")]
|
|
||||||
Cancelled,
|
|
||||||
/// Some other error
|
/// Some other error
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Other(#[from] anyhow::Error),
|
Other(#[from] anyhow::Error),
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "postgres_initdb"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
anyhow.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
camino.workspace = true
|
|
||||||
thiserror.workspace = true
|
|
||||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
//! The canonical way we run `initdb` in Neon.
|
|
||||||
//!
|
|
||||||
//! initdb has implicit defaults that are dependent on the environment, e.g., locales & collations.
|
|
||||||
//!
|
|
||||||
//! This module's job is to eliminate the environment-dependence as much as possible.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use camino::Utf8Path;
|
|
||||||
|
|
||||||
pub struct RunInitdbArgs<'a> {
|
|
||||||
pub superuser: &'a str,
|
|
||||||
pub locale: &'a str,
|
|
||||||
pub initdb_bin: &'a Utf8Path,
|
|
||||||
pub pg_version: u32,
|
|
||||||
pub library_search_path: &'a Utf8Path,
|
|
||||||
pub pgdata: &'a Utf8Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
Spawn(std::io::Error),
|
|
||||||
Failed {
|
|
||||||
status: std::process::ExitStatus,
|
|
||||||
stderr: Vec<u8>,
|
|
||||||
},
|
|
||||||
WaitOutput(std::io::Error),
|
|
||||||
Other(anyhow::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Error::Spawn(e) => write!(f, "Error spawning command: {:?}", e),
|
|
||||||
Error::Failed { status, stderr } => write!(
|
|
||||||
f,
|
|
||||||
"Command failed with status {:?}: {}",
|
|
||||||
status,
|
|
||||||
String::from_utf8_lossy(stderr)
|
|
||||||
),
|
|
||||||
Error::WaitOutput(e) => write!(f, "Error waiting for command output: {:?}", e),
|
|
||||||
Error::Other(e) => write!(f, "Error: {:?}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> {
|
|
||||||
let RunInitdbArgs {
|
|
||||||
superuser,
|
|
||||||
locale,
|
|
||||||
initdb_bin: initdb_bin_path,
|
|
||||||
pg_version,
|
|
||||||
library_search_path,
|
|
||||||
pgdata,
|
|
||||||
} = args;
|
|
||||||
let mut initdb_command = tokio::process::Command::new(initdb_bin_path);
|
|
||||||
initdb_command
|
|
||||||
.args(["--pgdata", pgdata.as_ref()])
|
|
||||||
.args(["--username", superuser])
|
|
||||||
.args(["--encoding", "utf8"])
|
|
||||||
.args(["--locale", locale])
|
|
||||||
.arg("--no-instructions")
|
|
||||||
.arg("--no-sync")
|
|
||||||
.env_clear()
|
|
||||||
.env("LD_LIBRARY_PATH", library_search_path)
|
|
||||||
.env("DYLD_LIBRARY_PATH", library_search_path)
|
|
||||||
.stdin(std::process::Stdio::null())
|
|
||||||
// stdout invocation produces the same output every time, we don't need it
|
|
||||||
.stdout(std::process::Stdio::null())
|
|
||||||
// we would be interested in the stderr output, if there was any
|
|
||||||
.stderr(std::process::Stdio::piped());
|
|
||||||
|
|
||||||
// Before version 14, only the libc provide was available.
|
|
||||||
if pg_version > 14 {
|
|
||||||
// Version 17 brought with it a builtin locale provider which only provides
|
|
||||||
// C and C.UTF-8. While being safer for collation purposes since it is
|
|
||||||
// guaranteed to be consistent throughout a major release, it is also more
|
|
||||||
// performant.
|
|
||||||
let locale_provider = if pg_version >= 17 { "builtin" } else { "libc" };
|
|
||||||
|
|
||||||
initdb_command.args(["--locale-provider", locale_provider]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let initdb_proc = initdb_command.spawn().map_err(Error::Spawn)?;
|
|
||||||
|
|
||||||
// Ideally we'd select here with the cancellation token, but the problem is that
|
|
||||||
// we can't safely terminate initdb: it launches processes of its own, and killing
|
|
||||||
// initdb doesn't kill them. After we return from this function, we want the target
|
|
||||||
// directory to be able to be cleaned up.
|
|
||||||
// See https://github.com/neondatabase/neon/issues/6385
|
|
||||||
let initdb_output = initdb_proc
|
|
||||||
.wait_with_output()
|
|
||||||
.await
|
|
||||||
.map_err(Error::WaitOutput)?;
|
|
||||||
if !initdb_output.status.success() {
|
|
||||||
return Err(Error::Failed {
|
|
||||||
status: initdb_output.status,
|
|
||||||
stderr: initdb_output.stderr,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -100,7 +100,7 @@ impl StartupMessageParamsBuilder {
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct StartupMessageParams {
|
pub struct StartupMessageParams {
|
||||||
pub params: Bytes,
|
params: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StartupMessageParams {
|
impl StartupMessageParams {
|
||||||
@@ -185,7 +185,7 @@ pub struct CancelKeyData {
|
|||||||
impl fmt::Display for CancelKeyData {
|
impl fmt::Display for CancelKeyData {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let hi = (self.backend_pid as u64) << 32;
|
let hi = (self.backend_pid as u64) << 32;
|
||||||
let lo = (self.cancel_key as u64) & 0xffffffff;
|
let lo = self.cancel_key as u64;
|
||||||
let id = hi | lo;
|
let id = hi | lo;
|
||||||
|
|
||||||
// This format is more compact and might work better for logs.
|
// This format is more compact and might work better for logs.
|
||||||
@@ -562,11 +562,6 @@ pub enum BeMessage<'a> {
|
|||||||
options: &'a [&'a str],
|
options: &'a [&'a str],
|
||||||
},
|
},
|
||||||
KeepAlive(WalSndKeepAlive),
|
KeepAlive(WalSndKeepAlive),
|
||||||
/// Batch of interpreted, shard filtered WAL records,
|
|
||||||
/// ready for the pageserver to ingest
|
|
||||||
InterpretedWalRecords(InterpretedWalRecordsBody<'a>),
|
|
||||||
|
|
||||||
Raw(u8, &'a [u8]),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Common shorthands.
|
/// Common shorthands.
|
||||||
@@ -677,22 +672,6 @@ pub struct WalSndKeepAlive {
|
|||||||
pub request_reply: bool,
|
pub request_reply: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Batch of interpreted WAL records used in the interpreted
|
|
||||||
/// safekeeper to pageserver protocol.
|
|
||||||
///
|
|
||||||
/// Note that the pageserver uses the RawInterpretedWalRecordsBody
|
|
||||||
/// counterpart of this from the neondatabase/rust-postgres repo.
|
|
||||||
/// If you're changing this struct, you likely need to change its
|
|
||||||
/// twin as well.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct InterpretedWalRecordsBody<'a> {
|
|
||||||
/// End of raw WAL in [`Self::data`]
|
|
||||||
pub streaming_lsn: u64,
|
|
||||||
/// Current end of WAL on the server
|
|
||||||
pub commit_lsn: u64,
|
|
||||||
pub data: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
pub static HELLO_WORLD_ROW: BeMessage = BeMessage::DataRow(&[Some(b"hello world")]);
|
pub static HELLO_WORLD_ROW: BeMessage = BeMessage::DataRow(&[Some(b"hello world")]);
|
||||||
|
|
||||||
// single text column
|
// single text column
|
||||||
@@ -756,10 +735,6 @@ impl BeMessage<'_> {
|
|||||||
/// one more buffer.
|
/// one more buffer.
|
||||||
pub fn write(buf: &mut BytesMut, message: &BeMessage) -> Result<(), ProtocolError> {
|
pub fn write(buf: &mut BytesMut, message: &BeMessage) -> Result<(), ProtocolError> {
|
||||||
match message {
|
match message {
|
||||||
BeMessage::Raw(code, data) => {
|
|
||||||
buf.put_u8(*code);
|
|
||||||
write_body(buf, |b| b.put_slice(data))
|
|
||||||
}
|
|
||||||
BeMessage::AuthenticationOk => {
|
BeMessage::AuthenticationOk => {
|
||||||
buf.put_u8(b'R');
|
buf.put_u8(b'R');
|
||||||
write_body(buf, |buf| {
|
write_body(buf, |buf| {
|
||||||
@@ -1021,19 +996,6 @@ impl BeMessage<'_> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
})?
|
})?
|
||||||
}
|
}
|
||||||
|
|
||||||
BeMessage::InterpretedWalRecords(rec) => {
|
|
||||||
// We use the COPY_DATA_TAG for our custom message
|
|
||||||
// since this tag is interpreted as raw bytes.
|
|
||||||
buf.put_u8(b'd');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_u8(b'0'); // matches INTERPRETED_WAL_RECORD_TAG in postgres-protocol
|
|
||||||
// dependency
|
|
||||||
buf.put_u64(rec.streaming_lsn);
|
|
||||||
buf.put_u64(rec.commit_lsn);
|
|
||||||
buf.put_slice(rec.data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -1084,13 +1046,4 @@ mod tests {
|
|||||||
let data = [0, 0, 0, 7, 0, 0, 0, 0];
|
let data = [0, 0, 0, 7, 0, 0, 0, 0];
|
||||||
FeStartupPacket::parse(&mut BytesMut::from_iter(data)).unwrap_err();
|
FeStartupPacket::parse(&mut BytesMut::from_iter(data)).unwrap_err();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn cancel_key_data() {
|
|
||||||
let key = CancelKeyData {
|
|
||||||
backend_pid: -1817212860,
|
|
||||||
cancel_key: -1183897012,
|
|
||||||
};
|
|
||||||
assert_eq!(format!("{key}"), "CancelKeyData(93af8844b96f2a4c)");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
This directory contains libraries that are specific for proxy.
|
|
||||||
|
|
||||||
Currently, it contains a signficant fork/refactoring of rust-postgres that no longer reflects the API
|
|
||||||
of the original library. Since it was so significant, it made sense to upgrade it to it's own set of libraries.
|
|
||||||
|
|
||||||
Proxy needs unique access to the protocol, which explains why such heavy modifications were necessary.
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "postgres-protocol2"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2018"
|
|
||||||
license = "MIT/Apache-2.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
base64 = "0.20"
|
|
||||||
byteorder.workspace = true
|
|
||||||
bytes.workspace = true
|
|
||||||
fallible-iterator.workspace = true
|
|
||||||
hmac.workspace = true
|
|
||||||
memchr = "2.0"
|
|
||||||
rand.workspace = true
|
|
||||||
sha2.workspace = true
|
|
||||||
stringprep = "0.1"
|
|
||||||
tokio = { workspace = true, features = ["rt"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio = { workspace = true, features = ["full"] }
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
//! Authentication protocol support.
|
|
||||||
pub mod sasl;
|
|
||||||
@@ -1,516 +0,0 @@
|
|||||||
//! SASL-based authentication support.
|
|
||||||
|
|
||||||
use hmac::{Hmac, Mac};
|
|
||||||
use rand::{self, Rng};
|
|
||||||
use sha2::digest::FixedOutput;
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
use std::fmt::Write;
|
|
||||||
use std::io;
|
|
||||||
use std::iter;
|
|
||||||
use std::mem;
|
|
||||||
use std::str;
|
|
||||||
use tokio::task::yield_now;
|
|
||||||
|
|
||||||
const NONCE_LENGTH: usize = 24;
|
|
||||||
|
|
||||||
/// The identifier of the SCRAM-SHA-256 SASL authentication mechanism.
|
|
||||||
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
|
||||||
/// The identifier of the SCRAM-SHA-256-PLUS SASL authentication mechanism.
|
|
||||||
pub const SCRAM_SHA_256_PLUS: &str = "SCRAM-SHA-256-PLUS";
|
|
||||||
|
|
||||||
// since postgres passwords are not required to exclude saslprep-prohibited
|
|
||||||
// characters or even be valid UTF8, we run saslprep if possible and otherwise
|
|
||||||
// return the raw password.
|
|
||||||
fn normalize(pass: &[u8]) -> Vec<u8> {
|
|
||||||
let pass = match str::from_utf8(pass) {
|
|
||||||
Ok(pass) => pass,
|
|
||||||
Err(_) => return pass.to_vec(),
|
|
||||||
};
|
|
||||||
|
|
||||||
match stringprep::saslprep(pass) {
|
|
||||||
Ok(pass) => pass.into_owned().into_bytes(),
|
|
||||||
Err(_) => pass.as_bytes().to_vec(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn hi(str: &[u8], salt: &[u8], iterations: u32) -> [u8; 32] {
|
|
||||||
let mut hmac =
|
|
||||||
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(salt);
|
|
||||||
hmac.update(&[0, 0, 0, 1]);
|
|
||||||
let mut prev = hmac.finalize().into_bytes();
|
|
||||||
|
|
||||||
let mut hi = prev;
|
|
||||||
|
|
||||||
for i in 1..iterations {
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
|
|
||||||
hmac.update(&prev);
|
|
||||||
prev = hmac.finalize().into_bytes();
|
|
||||||
|
|
||||||
for (hi, prev) in hi.iter_mut().zip(prev) {
|
|
||||||
*hi ^= prev;
|
|
||||||
}
|
|
||||||
// yield every ~250us
|
|
||||||
// hopefully reduces tail latencies
|
|
||||||
if i % 1024 == 0 {
|
|
||||||
yield_now().await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hi.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ChannelBindingInner {
|
|
||||||
Unrequested,
|
|
||||||
Unsupported,
|
|
||||||
TlsServerEndPoint(Vec<u8>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The channel binding configuration for a SCRAM authentication exchange.
|
|
||||||
pub struct ChannelBinding(ChannelBindingInner);
|
|
||||||
|
|
||||||
impl ChannelBinding {
|
|
||||||
/// The server did not request channel binding.
|
|
||||||
pub fn unrequested() -> ChannelBinding {
|
|
||||||
ChannelBinding(ChannelBindingInner::Unrequested)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The server requested channel binding but the client is unable to provide it.
|
|
||||||
pub fn unsupported() -> ChannelBinding {
|
|
||||||
ChannelBinding(ChannelBindingInner::Unsupported)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The server requested channel binding and the client will use the `tls-server-end-point`
|
|
||||||
/// method.
|
|
||||||
pub fn tls_server_end_point(signature: Vec<u8>) -> ChannelBinding {
|
|
||||||
ChannelBinding(ChannelBindingInner::TlsServerEndPoint(signature))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gs2_header(&self) -> &'static str {
|
|
||||||
match self.0 {
|
|
||||||
ChannelBindingInner::Unrequested => "y,,",
|
|
||||||
ChannelBindingInner::Unsupported => "n,,",
|
|
||||||
ChannelBindingInner::TlsServerEndPoint(_) => "p=tls-server-end-point,,",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cbind_data(&self) -> &[u8] {
|
|
||||||
match self.0 {
|
|
||||||
ChannelBindingInner::Unrequested | ChannelBindingInner::Unsupported => &[],
|
|
||||||
ChannelBindingInner::TlsServerEndPoint(ref buf) => buf,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A pair of keys for the SCRAM-SHA-256 mechanism.
|
|
||||||
/// See <https://datatracker.ietf.org/doc/html/rfc5802#section-3> for details.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub struct ScramKeys<const N: usize> {
|
|
||||||
/// Used by server to authenticate client.
|
|
||||||
pub client_key: [u8; N],
|
|
||||||
/// Used by client to verify server's signature.
|
|
||||||
pub server_key: [u8; N],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Password or keys which were derived from it.
|
|
||||||
enum Credentials<const N: usize> {
|
|
||||||
/// A regular password as a vector of bytes.
|
|
||||||
Password(Vec<u8>),
|
|
||||||
/// A precomputed pair of keys.
|
|
||||||
Keys(ScramKeys<N>),
|
|
||||||
}
|
|
||||||
|
|
||||||
enum State {
|
|
||||||
Update {
|
|
||||||
nonce: String,
|
|
||||||
password: Credentials<32>,
|
|
||||||
channel_binding: ChannelBinding,
|
|
||||||
},
|
|
||||||
Finish {
|
|
||||||
server_key: [u8; 32],
|
|
||||||
auth_message: String,
|
|
||||||
},
|
|
||||||
Done,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A type which handles the client side of the SCRAM-SHA-256/SCRAM-SHA-256-PLUS authentication
|
|
||||||
/// process.
|
|
||||||
///
|
|
||||||
/// During the authentication process, if the backend sends an `AuthenticationSASL` message which
|
|
||||||
/// includes `SCRAM-SHA-256` as an authentication mechanism, this type can be used.
|
|
||||||
///
|
|
||||||
/// After a `ScramSha256` is constructed, the buffer returned by the `message()` method should be
|
|
||||||
/// sent to the backend in a `SASLInitialResponse` message along with the mechanism name.
|
|
||||||
///
|
|
||||||
/// The server will reply with an `AuthenticationSASLContinue` message. Its contents should be
|
|
||||||
/// passed to the `update()` method, after which the buffer returned by the `message()` method
|
|
||||||
/// should be sent to the backend in a `SASLResponse` message.
|
|
||||||
///
|
|
||||||
/// The server will reply with an `AuthenticationSASLFinal` message. Its contents should be passed
|
|
||||||
/// to the `finish()` method, after which the authentication process is complete.
|
|
||||||
pub struct ScramSha256 {
|
|
||||||
message: String,
|
|
||||||
state: State,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn nonce() -> String {
|
|
||||||
// rand 0.5's ThreadRng is cryptographically secure
|
|
||||||
let mut rng = rand::thread_rng();
|
|
||||||
(0..NONCE_LENGTH)
|
|
||||||
.map(|_| {
|
|
||||||
let mut v = rng.gen_range(0x21u8..0x7e);
|
|
||||||
if v == 0x2c {
|
|
||||||
v = 0x7e
|
|
||||||
}
|
|
||||||
v as char
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ScramSha256 {
|
|
||||||
/// Constructs a new instance which will use the provided password for authentication.
|
|
||||||
pub fn new(password: &[u8], channel_binding: ChannelBinding) -> ScramSha256 {
|
|
||||||
let password = Credentials::Password(normalize(password));
|
|
||||||
ScramSha256::new_inner(password, channel_binding, nonce())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Constructs a new instance which will use the provided key pair for authentication.
|
|
||||||
pub fn new_with_keys(keys: ScramKeys<32>, channel_binding: ChannelBinding) -> ScramSha256 {
|
|
||||||
let password = Credentials::Keys(keys);
|
|
||||||
ScramSha256::new_inner(password, channel_binding, nonce())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_inner(
|
|
||||||
password: Credentials<32>,
|
|
||||||
channel_binding: ChannelBinding,
|
|
||||||
nonce: String,
|
|
||||||
) -> ScramSha256 {
|
|
||||||
ScramSha256 {
|
|
||||||
message: format!("{}n=,r={}", channel_binding.gs2_header(), nonce),
|
|
||||||
state: State::Update {
|
|
||||||
nonce,
|
|
||||||
password,
|
|
||||||
channel_binding,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the message which should be sent to the backend in an `SASLResponse` message.
|
|
||||||
pub fn message(&self) -> &[u8] {
|
|
||||||
if let State::Done = self.state {
|
|
||||||
panic!("invalid SCRAM state");
|
|
||||||
}
|
|
||||||
self.message.as_bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the state machine with the response from the backend.
|
|
||||||
///
|
|
||||||
/// This should be called when an `AuthenticationSASLContinue` message is received.
|
|
||||||
pub async fn update(&mut self, message: &[u8]) -> io::Result<()> {
|
|
||||||
let (client_nonce, password, channel_binding) =
|
|
||||||
match mem::replace(&mut self.state, State::Done) {
|
|
||||||
State::Update {
|
|
||||||
nonce,
|
|
||||||
password,
|
|
||||||
channel_binding,
|
|
||||||
} => (nonce, password, channel_binding),
|
|
||||||
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
|
|
||||||
};
|
|
||||||
|
|
||||||
let message =
|
|
||||||
str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
|
|
||||||
|
|
||||||
let parsed = Parser::new(message).server_first_message()?;
|
|
||||||
|
|
||||||
if !parsed.nonce.starts_with(&client_nonce) {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid nonce"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (client_key, server_key) = match password {
|
|
||||||
Credentials::Password(password) => {
|
|
||||||
let salt = match base64::decode(parsed.salt) {
|
|
||||||
Ok(salt) => salt,
|
|
||||||
Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let salted_password = hi(&password, &salt, parsed.iteration_count).await;
|
|
||||||
|
|
||||||
let make_key = |name| {
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
|
|
||||||
.expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(name);
|
|
||||||
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
key.copy_from_slice(hmac.finalize().into_bytes().as_slice());
|
|
||||||
key
|
|
||||||
};
|
|
||||||
|
|
||||||
(make_key(b"Client Key"), make_key(b"Server Key"))
|
|
||||||
}
|
|
||||||
Credentials::Keys(keys) => (keys.client_key, keys.server_key),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut hash = Sha256::default();
|
|
||||||
hash.update(client_key);
|
|
||||||
let stored_key = hash.finalize_fixed();
|
|
||||||
|
|
||||||
let mut cbind_input = vec![];
|
|
||||||
cbind_input.extend(channel_binding.gs2_header().as_bytes());
|
|
||||||
cbind_input.extend(channel_binding.cbind_data());
|
|
||||||
let cbind_input = base64::encode(&cbind_input);
|
|
||||||
|
|
||||||
self.message.clear();
|
|
||||||
write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap();
|
|
||||||
|
|
||||||
let auth_message = format!("n=,r={},{},{}", client_nonce, message, self.message);
|
|
||||||
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(&stored_key)
|
|
||||||
.expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(auth_message.as_bytes());
|
|
||||||
let client_signature = hmac.finalize().into_bytes();
|
|
||||||
|
|
||||||
let mut client_proof = client_key;
|
|
||||||
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
|
|
||||||
*proof ^= signature;
|
|
||||||
}
|
|
||||||
|
|
||||||
write!(&mut self.message, ",p={}", base64::encode(client_proof)).unwrap();
|
|
||||||
|
|
||||||
self.state = State::Finish {
|
|
||||||
server_key,
|
|
||||||
auth_message,
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Finalizes the authentication process.
|
|
||||||
///
|
|
||||||
/// This should be called when the backend sends an `AuthenticationSASLFinal` message.
|
|
||||||
/// Authentication has only succeeded if this method returns `Ok(())`.
|
|
||||||
pub fn finish(&mut self, message: &[u8]) -> io::Result<()> {
|
|
||||||
let (server_key, auth_message) = match mem::replace(&mut self.state, State::Done) {
|
|
||||||
State::Finish {
|
|
||||||
server_key,
|
|
||||||
auth_message,
|
|
||||||
} => (server_key, auth_message),
|
|
||||||
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
|
|
||||||
};
|
|
||||||
|
|
||||||
let message =
|
|
||||||
str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
|
|
||||||
|
|
||||||
let parsed = Parser::new(message).server_final_message()?;
|
|
||||||
|
|
||||||
let verifier = match parsed {
|
|
||||||
ServerFinalMessage::Error(e) => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::Other,
|
|
||||||
format!("SCRAM error: {}", e),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
ServerFinalMessage::Verifier(verifier) => verifier,
|
|
||||||
};
|
|
||||||
|
|
||||||
let verifier = match base64::decode(verifier) {
|
|
||||||
Ok(verifier) => verifier,
|
|
||||||
Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(&server_key)
|
|
||||||
.expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(auth_message.as_bytes());
|
|
||||||
hmac.verify_slice(&verifier)
|
|
||||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Parser<'a> {
|
|
||||||
s: &'a str,
|
|
||||||
it: iter::Peekable<str::CharIndices<'a>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Parser<'a> {
|
|
||||||
fn new(s: &'a str) -> Parser<'a> {
|
|
||||||
Parser {
|
|
||||||
s,
|
|
||||||
it: s.char_indices().peekable(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eat(&mut self, target: char) -> io::Result<()> {
|
|
||||||
match self.it.next() {
|
|
||||||
Some((_, c)) if c == target => Ok(()),
|
|
||||||
Some((i, c)) => {
|
|
||||||
let m = format!(
|
|
||||||
"unexpected character at byte {}: expected `{}` but got `{}",
|
|
||||||
i, target, c
|
|
||||||
);
|
|
||||||
Err(io::Error::new(io::ErrorKind::InvalidInput, m))
|
|
||||||
}
|
|
||||||
None => Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
"unexpected EOF",
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn take_while<F>(&mut self, f: F) -> io::Result<&'a str>
|
|
||||||
where
|
|
||||||
F: Fn(char) -> bool,
|
|
||||||
{
|
|
||||||
let start = match self.it.peek() {
|
|
||||||
Some(&(i, _)) => i,
|
|
||||||
None => return Ok(""),
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match self.it.peek() {
|
|
||||||
Some(&(_, c)) if f(c) => {
|
|
||||||
self.it.next();
|
|
||||||
}
|
|
||||||
Some(&(i, _)) => return Ok(&self.s[start..i]),
|
|
||||||
None => return Ok(&self.s[start..]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn printable(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.take_while(|c| matches!(c, '\x21'..='\x2b' | '\x2d'..='\x7e'))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn nonce(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.eat('r')?;
|
|
||||||
self.eat('=')?;
|
|
||||||
self.printable()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn base64(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.take_while(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '='))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn salt(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.eat('s')?;
|
|
||||||
self.eat('=')?;
|
|
||||||
self.base64()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn posit_number(&mut self) -> io::Result<u32> {
|
|
||||||
let n = self.take_while(|c| c.is_ascii_digit())?;
|
|
||||||
n.parse()
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iteration_count(&mut self) -> io::Result<u32> {
|
|
||||||
self.eat('i')?;
|
|
||||||
self.eat('=')?;
|
|
||||||
self.posit_number()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eof(&mut self) -> io::Result<()> {
|
|
||||||
match self.it.peek() {
|
|
||||||
Some(&(i, _)) => Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
format!("unexpected trailing data at byte {}", i),
|
|
||||||
)),
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn server_first_message(&mut self) -> io::Result<ServerFirstMessage<'a>> {
|
|
||||||
let nonce = self.nonce()?;
|
|
||||||
self.eat(',')?;
|
|
||||||
let salt = self.salt()?;
|
|
||||||
self.eat(',')?;
|
|
||||||
let iteration_count = self.iteration_count()?;
|
|
||||||
self.eof()?;
|
|
||||||
|
|
||||||
Ok(ServerFirstMessage {
|
|
||||||
nonce,
|
|
||||||
salt,
|
|
||||||
iteration_count,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn value(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.take_while(|c| matches!(c, '\0' | '=' | ','))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn server_error(&mut self) -> io::Result<Option<&'a str>> {
|
|
||||||
match self.it.peek() {
|
|
||||||
Some(&(_, 'e')) => {}
|
|
||||||
_ => return Ok(None),
|
|
||||||
}
|
|
||||||
|
|
||||||
self.eat('e')?;
|
|
||||||
self.eat('=')?;
|
|
||||||
self.value().map(Some)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verifier(&mut self) -> io::Result<&'a str> {
|
|
||||||
self.eat('v')?;
|
|
||||||
self.eat('=')?;
|
|
||||||
self.base64()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn server_final_message(&mut self) -> io::Result<ServerFinalMessage<'a>> {
|
|
||||||
let message = match self.server_error()? {
|
|
||||||
Some(error) => ServerFinalMessage::Error(error),
|
|
||||||
None => ServerFinalMessage::Verifier(self.verifier()?),
|
|
||||||
};
|
|
||||||
self.eof()?;
|
|
||||||
Ok(message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ServerFirstMessage<'a> {
|
|
||||||
nonce: &'a str,
|
|
||||||
salt: &'a str,
|
|
||||||
iteration_count: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ServerFinalMessage<'a> {
|
|
||||||
Error(&'a str),
|
|
||||||
Verifier(&'a str),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_server_first_message() {
|
|
||||||
let message = "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096";
|
|
||||||
let message = Parser::new(message).server_first_message().unwrap();
|
|
||||||
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
|
|
||||||
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
|
|
||||||
assert_eq!(message.iteration_count, 4096);
|
|
||||||
}
|
|
||||||
|
|
||||||
// recorded auth exchange from psql
|
|
||||||
#[tokio::test]
|
|
||||||
async fn exchange() {
|
|
||||||
let password = "foobar";
|
|
||||||
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
|
||||||
|
|
||||||
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
|
||||||
let server_first =
|
|
||||||
"r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
|
|
||||||
=4096";
|
|
||||||
let client_final =
|
|
||||||
"c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
|
|
||||||
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
|
|
||||||
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
|
|
||||||
|
|
||||||
let mut scram = ScramSha256::new_inner(
|
|
||||||
Credentials::Password(normalize(password.as_bytes())),
|
|
||||||
ChannelBinding::unsupported(),
|
|
||||||
nonce.to_string(),
|
|
||||||
);
|
|
||||||
assert_eq!(str::from_utf8(scram.message()).unwrap(), client_first);
|
|
||||||
|
|
||||||
scram.update(server_first.as_bytes()).await.unwrap();
|
|
||||||
assert_eq!(str::from_utf8(scram.message()).unwrap(), client_final);
|
|
||||||
|
|
||||||
scram.finish(server_final.as_bytes()).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
//! Provides functions for escaping literals and identifiers for use
|
|
||||||
//! in SQL queries.
|
|
||||||
//!
|
|
||||||
//! Prefer parameterized queries where possible. Do not escape
|
|
||||||
//! parameters in a parameterized query.
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test;
|
|
||||||
|
|
||||||
/// Escape a literal and surround result with single quotes. Not
|
|
||||||
/// recommended in most cases.
|
|
||||||
///
|
|
||||||
/// If input contains backslashes, result will be of the form `
|
|
||||||
/// E'...'` so it is safe to use regardless of the setting of
|
|
||||||
/// standard_conforming_strings.
|
|
||||||
pub fn escape_literal(input: &str) -> String {
|
|
||||||
escape_internal(input, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Escape an identifier and surround result with double quotes.
|
|
||||||
pub fn escape_identifier(input: &str) -> String {
|
|
||||||
escape_internal(input, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Translation of PostgreSQL libpq's PQescapeInternal(). Does not
|
|
||||||
// require a connection because input string is known to be valid
|
|
||||||
// UTF-8.
|
|
||||||
//
|
|
||||||
// Escape arbitrary strings. If as_ident is true, we escape the
|
|
||||||
// result as an identifier; if false, as a literal. The result is
|
|
||||||
// returned in a newly allocated buffer. If we fail due to an
|
|
||||||
// encoding violation or out of memory condition, we return NULL,
|
|
||||||
// storing an error message into conn.
|
|
||||||
fn escape_internal(input: &str, as_ident: bool) -> String {
|
|
||||||
let mut num_backslashes = 0;
|
|
||||||
let mut num_quotes = 0;
|
|
||||||
let quote_char = if as_ident { '"' } else { '\'' };
|
|
||||||
|
|
||||||
// Scan the string for characters that must be escaped.
|
|
||||||
for ch in input.chars() {
|
|
||||||
if ch == quote_char {
|
|
||||||
num_quotes += 1;
|
|
||||||
} else if ch == '\\' {
|
|
||||||
num_backslashes += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate output String.
|
|
||||||
let mut result_size = input.len() + num_quotes + 3; // two quotes, plus a NUL
|
|
||||||
if !as_ident && num_backslashes > 0 {
|
|
||||||
result_size += num_backslashes + 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut output = String::with_capacity(result_size);
|
|
||||||
|
|
||||||
// If we are escaping a literal that contains backslashes, we use
|
|
||||||
// the escape string syntax so that the result is correct under
|
|
||||||
// either value of standard_conforming_strings. We also emit a
|
|
||||||
// leading space in this case, to guard against the possibility
|
|
||||||
// that the result might be interpolated immediately following an
|
|
||||||
// identifier.
|
|
||||||
if !as_ident && num_backslashes > 0 {
|
|
||||||
output.push(' ');
|
|
||||||
output.push('E');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Opening quote.
|
|
||||||
output.push(quote_char);
|
|
||||||
|
|
||||||
// Use fast path if possible.
|
|
||||||
//
|
|
||||||
// We've already verified that the input string is well-formed in
|
|
||||||
// the current encoding. If it contains no quotes and, in the
|
|
||||||
// case of literal-escaping, no backslashes, then we can just copy
|
|
||||||
// it directly to the output buffer, adding the necessary quotes.
|
|
||||||
//
|
|
||||||
// If not, we must rescan the input and process each character
|
|
||||||
// individually.
|
|
||||||
if num_quotes == 0 && (num_backslashes == 0 || as_ident) {
|
|
||||||
output.push_str(input);
|
|
||||||
} else {
|
|
||||||
for ch in input.chars() {
|
|
||||||
if ch == quote_char || (!as_ident && ch == '\\') {
|
|
||||||
output.push(ch);
|
|
||||||
}
|
|
||||||
output.push(ch);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output.push(quote_char);
|
|
||||||
|
|
||||||
output
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
use crate::escape::{escape_identifier, escape_literal};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_escape_idenifier() {
|
|
||||||
assert_eq!(escape_identifier("foo"), String::from("\"foo\""));
|
|
||||||
assert_eq!(escape_identifier("f\\oo"), String::from("\"f\\oo\""));
|
|
||||||
assert_eq!(escape_identifier("f'oo"), String::from("\"f'oo\""));
|
|
||||||
assert_eq!(escape_identifier("f\"oo"), String::from("\"f\"\"oo\""));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_escape_literal() {
|
|
||||||
assert_eq!(escape_literal("foo"), String::from("'foo'"));
|
|
||||||
assert_eq!(escape_literal("f\\oo"), String::from(" E'f\\\\oo'"));
|
|
||||||
assert_eq!(escape_literal("f'oo"), String::from("'f''oo'"));
|
|
||||||
assert_eq!(escape_literal("f\"oo"), String::from("'f\"oo'"));
|
|
||||||
}
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
//! Low level Postgres protocol APIs.
|
|
||||||
//!
|
|
||||||
//! This crate implements the low level components of Postgres's communication
|
|
||||||
//! protocol, including message and value serialization and deserialization.
|
|
||||||
//! It is designed to be used as a building block by higher level APIs such as
|
|
||||||
//! `rust-postgres`, and should not typically be used directly.
|
|
||||||
//!
|
|
||||||
//! # Note
|
|
||||||
//!
|
|
||||||
//! This library assumes that the `client_encoding` backend parameter has been
|
|
||||||
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
|
|
||||||
#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")]
|
|
||||||
#![warn(missing_docs, rust_2018_idioms, clippy::all)]
|
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
|
||||||
use bytes::{BufMut, BytesMut};
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
pub mod authentication;
|
|
||||||
pub mod escape;
|
|
||||||
pub mod message;
|
|
||||||
pub mod password;
|
|
||||||
pub mod types;
|
|
||||||
|
|
||||||
/// A Postgres OID.
|
|
||||||
pub type Oid = u32;
|
|
||||||
|
|
||||||
/// A Postgres Log Sequence Number (LSN).
|
|
||||||
pub type Lsn = u64;
|
|
||||||
|
|
||||||
/// An enum indicating if a value is `NULL` or not.
|
|
||||||
pub enum IsNull {
|
|
||||||
/// The value is `NULL`.
|
|
||||||
Yes,
|
|
||||||
/// The value is not `NULL`.
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_nullable<F, E>(serializer: F, buf: &mut BytesMut) -> Result<(), E>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut BytesMut) -> Result<IsNull, E>,
|
|
||||||
E: From<io::Error>,
|
|
||||||
{
|
|
||||||
let base = buf.len();
|
|
||||||
buf.put_i32(0);
|
|
||||||
let size = match serializer(buf)? {
|
|
||||||
IsNull::No => i32::from_usize(buf.len() - base - 4)?,
|
|
||||||
IsNull::Yes => -1,
|
|
||||||
};
|
|
||||||
BigEndian::write_i32(&mut buf[base..], size);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
trait FromUsize: Sized {
|
|
||||||
fn from_usize(x: usize) -> Result<Self, io::Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! from_usize {
|
|
||||||
($t:ty) => {
|
|
||||||
impl FromUsize for $t {
|
|
||||||
#[inline]
|
|
||||||
fn from_usize(x: usize) -> io::Result<$t> {
|
|
||||||
if x > <$t>::MAX as usize {
|
|
||||||
Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"value too large to transmit",
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
Ok(x as $t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
from_usize!(i16);
|
|
||||||
from_usize!(i32);
|
|
||||||
@@ -1,766 +0,0 @@
|
|||||||
#![allow(missing_docs)]
|
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
|
||||||
use bytes::{Bytes, BytesMut};
|
|
||||||
use fallible_iterator::FallibleIterator;
|
|
||||||
use memchr::memchr;
|
|
||||||
use std::cmp;
|
|
||||||
use std::io::{self, Read};
|
|
||||||
use std::ops::Range;
|
|
||||||
use std::str;
|
|
||||||
|
|
||||||
use crate::Oid;
|
|
||||||
|
|
||||||
// top-level message tags
|
|
||||||
const PARSE_COMPLETE_TAG: u8 = b'1';
|
|
||||||
const BIND_COMPLETE_TAG: u8 = b'2';
|
|
||||||
const CLOSE_COMPLETE_TAG: u8 = b'3';
|
|
||||||
pub const NOTIFICATION_RESPONSE_TAG: u8 = b'A';
|
|
||||||
const COPY_DONE_TAG: u8 = b'c';
|
|
||||||
const COMMAND_COMPLETE_TAG: u8 = b'C';
|
|
||||||
const COPY_DATA_TAG: u8 = b'd';
|
|
||||||
const DATA_ROW_TAG: u8 = b'D';
|
|
||||||
const ERROR_RESPONSE_TAG: u8 = b'E';
|
|
||||||
const COPY_IN_RESPONSE_TAG: u8 = b'G';
|
|
||||||
const COPY_OUT_RESPONSE_TAG: u8 = b'H';
|
|
||||||
const COPY_BOTH_RESPONSE_TAG: u8 = b'W';
|
|
||||||
const EMPTY_QUERY_RESPONSE_TAG: u8 = b'I';
|
|
||||||
const BACKEND_KEY_DATA_TAG: u8 = b'K';
|
|
||||||
pub const NO_DATA_TAG: u8 = b'n';
|
|
||||||
pub const NOTICE_RESPONSE_TAG: u8 = b'N';
|
|
||||||
const AUTHENTICATION_TAG: u8 = b'R';
|
|
||||||
const PORTAL_SUSPENDED_TAG: u8 = b's';
|
|
||||||
pub const PARAMETER_STATUS_TAG: u8 = b'S';
|
|
||||||
const PARAMETER_DESCRIPTION_TAG: u8 = b't';
|
|
||||||
const ROW_DESCRIPTION_TAG: u8 = b'T';
|
|
||||||
pub const READY_FOR_QUERY_TAG: u8 = b'Z';
|
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
pub struct Header {
|
|
||||||
tag: u8,
|
|
||||||
len: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::len_without_is_empty)]
|
|
||||||
impl Header {
|
|
||||||
#[inline]
|
|
||||||
pub fn parse(buf: &[u8]) -> io::Result<Option<Header>> {
|
|
||||||
if buf.len() < 5 {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tag = buf[0];
|
|
||||||
let len = BigEndian::read_i32(&buf[1..]);
|
|
||||||
|
|
||||||
if len < 4 {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
"invalid message length: header length < 4",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(Header { tag, len }))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn tag(self) -> u8 {
|
|
||||||
self.tag
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn len(self) -> i32 {
|
|
||||||
self.len
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An enum representing Postgres backend messages.
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum Message {
|
|
||||||
AuthenticationCleartextPassword,
|
|
||||||
AuthenticationGss,
|
|
||||||
AuthenticationKerberosV5,
|
|
||||||
AuthenticationMd5Password,
|
|
||||||
AuthenticationOk,
|
|
||||||
AuthenticationScmCredential,
|
|
||||||
AuthenticationSspi,
|
|
||||||
AuthenticationGssContinue,
|
|
||||||
AuthenticationSasl(AuthenticationSaslBody),
|
|
||||||
AuthenticationSaslContinue(AuthenticationSaslContinueBody),
|
|
||||||
AuthenticationSaslFinal(AuthenticationSaslFinalBody),
|
|
||||||
BackendKeyData(BackendKeyDataBody),
|
|
||||||
BindComplete,
|
|
||||||
CloseComplete,
|
|
||||||
CommandComplete(CommandCompleteBody),
|
|
||||||
CopyData,
|
|
||||||
CopyDone,
|
|
||||||
CopyInResponse,
|
|
||||||
CopyOutResponse,
|
|
||||||
CopyBothResponse,
|
|
||||||
DataRow(DataRowBody),
|
|
||||||
EmptyQueryResponse,
|
|
||||||
ErrorResponse(ErrorResponseBody),
|
|
||||||
NoData,
|
|
||||||
NoticeResponse(NoticeResponseBody),
|
|
||||||
NotificationResponse(NotificationResponseBody),
|
|
||||||
ParameterDescription(ParameterDescriptionBody),
|
|
||||||
ParameterStatus(ParameterStatusBody),
|
|
||||||
ParseComplete,
|
|
||||||
PortalSuspended,
|
|
||||||
ReadyForQuery(ReadyForQueryBody),
|
|
||||||
RowDescription(RowDescriptionBody),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Message {
|
|
||||||
#[inline]
|
|
||||||
pub fn parse(buf: &mut BytesMut) -> io::Result<Option<Message>> {
|
|
||||||
if buf.len() < 5 {
|
|
||||||
let to_read = 5 - buf.len();
|
|
||||||
buf.reserve(to_read);
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tag = buf[0];
|
|
||||||
let len = (&buf[1..5]).read_u32::<BigEndian>().unwrap();
|
|
||||||
|
|
||||||
if len < 4 {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: parsing u32",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let total_len = len as usize + 1;
|
|
||||||
if buf.len() < total_len {
|
|
||||||
let to_read = total_len - buf.len();
|
|
||||||
buf.reserve(to_read);
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buf = Buffer {
|
|
||||||
bytes: buf.split_to(total_len).freeze(),
|
|
||||||
idx: 5,
|
|
||||||
};
|
|
||||||
|
|
||||||
let message = match tag {
|
|
||||||
PARSE_COMPLETE_TAG => Message::ParseComplete,
|
|
||||||
BIND_COMPLETE_TAG => Message::BindComplete,
|
|
||||||
CLOSE_COMPLETE_TAG => Message::CloseComplete,
|
|
||||||
NOTIFICATION_RESPONSE_TAG => {
|
|
||||||
let process_id = buf.read_i32::<BigEndian>()?;
|
|
||||||
let channel = buf.read_cstr()?;
|
|
||||||
let message = buf.read_cstr()?;
|
|
||||||
Message::NotificationResponse(NotificationResponseBody {
|
|
||||||
process_id,
|
|
||||||
channel,
|
|
||||||
message,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
COPY_DONE_TAG => Message::CopyDone,
|
|
||||||
COMMAND_COMPLETE_TAG => {
|
|
||||||
let tag = buf.read_cstr()?;
|
|
||||||
Message::CommandComplete(CommandCompleteBody { tag })
|
|
||||||
}
|
|
||||||
COPY_DATA_TAG => Message::CopyData,
|
|
||||||
DATA_ROW_TAG => {
|
|
||||||
let len = buf.read_u16::<BigEndian>()?;
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::DataRow(DataRowBody { storage, len })
|
|
||||||
}
|
|
||||||
ERROR_RESPONSE_TAG => {
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::ErrorResponse(ErrorResponseBody { storage })
|
|
||||||
}
|
|
||||||
COPY_IN_RESPONSE_TAG => Message::CopyInResponse,
|
|
||||||
COPY_OUT_RESPONSE_TAG => Message::CopyOutResponse,
|
|
||||||
COPY_BOTH_RESPONSE_TAG => Message::CopyBothResponse,
|
|
||||||
EMPTY_QUERY_RESPONSE_TAG => Message::EmptyQueryResponse,
|
|
||||||
BACKEND_KEY_DATA_TAG => {
|
|
||||||
let process_id = buf.read_i32::<BigEndian>()?;
|
|
||||||
let secret_key = buf.read_i32::<BigEndian>()?;
|
|
||||||
Message::BackendKeyData(BackendKeyDataBody {
|
|
||||||
process_id,
|
|
||||||
secret_key,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
NO_DATA_TAG => Message::NoData,
|
|
||||||
NOTICE_RESPONSE_TAG => {
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::NoticeResponse(NoticeResponseBody { storage })
|
|
||||||
}
|
|
||||||
AUTHENTICATION_TAG => match buf.read_i32::<BigEndian>()? {
|
|
||||||
0 => Message::AuthenticationOk,
|
|
||||||
2 => Message::AuthenticationKerberosV5,
|
|
||||||
3 => Message::AuthenticationCleartextPassword,
|
|
||||||
5 => Message::AuthenticationMd5Password,
|
|
||||||
6 => Message::AuthenticationScmCredential,
|
|
||||||
7 => Message::AuthenticationGss,
|
|
||||||
8 => Message::AuthenticationGssContinue,
|
|
||||||
9 => Message::AuthenticationSspi,
|
|
||||||
10 => {
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::AuthenticationSasl(AuthenticationSaslBody(storage))
|
|
||||||
}
|
|
||||||
11 => {
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::AuthenticationSaslContinue(AuthenticationSaslContinueBody(storage))
|
|
||||||
}
|
|
||||||
12 => {
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::AuthenticationSaslFinal(AuthenticationSaslFinalBody(storage))
|
|
||||||
}
|
|
||||||
tag => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
format!("unknown authentication tag `{}`", tag),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
PORTAL_SUSPENDED_TAG => Message::PortalSuspended,
|
|
||||||
PARAMETER_STATUS_TAG => {
|
|
||||||
let name = buf.read_cstr()?;
|
|
||||||
let value = buf.read_cstr()?;
|
|
||||||
Message::ParameterStatus(ParameterStatusBody { name, value })
|
|
||||||
}
|
|
||||||
PARAMETER_DESCRIPTION_TAG => {
|
|
||||||
let len = buf.read_u16::<BigEndian>()?;
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::ParameterDescription(ParameterDescriptionBody { storage, len })
|
|
||||||
}
|
|
||||||
ROW_DESCRIPTION_TAG => {
|
|
||||||
let len = buf.read_u16::<BigEndian>()?;
|
|
||||||
let storage = buf.read_all();
|
|
||||||
Message::RowDescription(RowDescriptionBody { storage, len })
|
|
||||||
}
|
|
||||||
READY_FOR_QUERY_TAG => {
|
|
||||||
let status = buf.read_u8()?;
|
|
||||||
Message::ReadyForQuery(ReadyForQueryBody { status })
|
|
||||||
}
|
|
||||||
tag => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
format!("unknown message tag `{}`", tag),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if !buf.is_empty() {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: expected buffer to be empty",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Buffer {
|
|
||||||
bytes: Bytes,
|
|
||||||
idx: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Buffer {
|
|
||||||
#[inline]
|
|
||||||
fn slice(&self) -> &[u8] {
|
|
||||||
&self.bytes[self.idx..]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn is_empty(&self) -> bool {
|
|
||||||
self.slice().is_empty()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn read_cstr(&mut self) -> io::Result<Bytes> {
|
|
||||||
match memchr(0, self.slice()) {
|
|
||||||
Some(pos) => {
|
|
||||||
let start = self.idx;
|
|
||||||
let end = start + pos;
|
|
||||||
let cstr = self.bytes.slice(start..end);
|
|
||||||
self.idx = end + 1;
|
|
||||||
Ok(cstr)
|
|
||||||
}
|
|
||||||
None => Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
"unexpected EOF",
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn read_all(&mut self) -> Bytes {
|
|
||||||
let buf = self.bytes.slice(self.idx..);
|
|
||||||
self.idx = self.bytes.len();
|
|
||||||
buf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Read for Buffer {
|
|
||||||
#[inline]
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let len = {
|
|
||||||
let slice = self.slice();
|
|
||||||
let len = cmp::min(slice.len(), buf.len());
|
|
||||||
buf[..len].copy_from_slice(&slice[..len]);
|
|
||||||
len
|
|
||||||
};
|
|
||||||
self.idx += len;
|
|
||||||
Ok(len)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AuthenticationMd5PasswordBody {
|
|
||||||
salt: [u8; 4],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AuthenticationMd5PasswordBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn salt(&self) -> [u8; 4] {
|
|
||||||
self.salt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AuthenticationSaslBody(Bytes);
|
|
||||||
|
|
||||||
impl AuthenticationSaslBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn mechanisms(&self) -> SaslMechanisms<'_> {
|
|
||||||
SaslMechanisms(&self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SaslMechanisms<'a>(&'a [u8]);
|
|
||||||
|
|
||||||
impl<'a> FallibleIterator for SaslMechanisms<'a> {
|
|
||||||
type Item = &'a str;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> io::Result<Option<&'a str>> {
|
|
||||||
let value_end = find_null(self.0, 0)?;
|
|
||||||
if value_end == 0 {
|
|
||||||
if self.0.len() != 1 {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
"invalid message length: expected to be at end of iterator for sasl",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(None)
|
|
||||||
} else {
|
|
||||||
let value = get_str(&self.0[..value_end])?;
|
|
||||||
self.0 = &self.0[value_end + 1..];
|
|
||||||
Ok(Some(value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AuthenticationSaslContinueBody(Bytes);
|
|
||||||
|
|
||||||
impl AuthenticationSaslContinueBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn data(&self) -> &[u8] {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AuthenticationSaslFinalBody(Bytes);
|
|
||||||
|
|
||||||
impl AuthenticationSaslFinalBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn data(&self) -> &[u8] {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BackendKeyDataBody {
|
|
||||||
process_id: i32,
|
|
||||||
secret_key: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackendKeyDataBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn process_id(&self) -> i32 {
|
|
||||||
self.process_id
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn secret_key(&self) -> i32 {
|
|
||||||
self.secret_key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CommandCompleteBody {
|
|
||||||
tag: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CommandCompleteBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn tag(&self) -> io::Result<&str> {
|
|
||||||
get_str(&self.tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct DataRowBody {
|
|
||||||
storage: Bytes,
|
|
||||||
len: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DataRowBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn ranges(&self) -> DataRowRanges<'_> {
|
|
||||||
DataRowRanges {
|
|
||||||
buf: &self.storage,
|
|
||||||
len: self.storage.len(),
|
|
||||||
remaining: self.len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn buffer(&self) -> &[u8] {
|
|
||||||
&self.storage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DataRowRanges<'a> {
|
|
||||||
buf: &'a [u8],
|
|
||||||
len: usize,
|
|
||||||
remaining: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FallibleIterator for DataRowRanges<'_> {
|
|
||||||
type Item = Option<Range<usize>>;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> io::Result<Option<Option<Range<usize>>>> {
|
|
||||||
if self.remaining == 0 {
|
|
||||||
if self.buf.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: datarowrange is not empty",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remaining -= 1;
|
|
||||||
let len = self.buf.read_i32::<BigEndian>()?;
|
|
||||||
if len < 0 {
|
|
||||||
Ok(Some(None))
|
|
||||||
} else {
|
|
||||||
let len = len as usize;
|
|
||||||
if self.buf.len() < len {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
"unexpected EOF",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let base = self.len - self.buf.len();
|
|
||||||
self.buf = &self.buf[len..];
|
|
||||||
Ok(Some(Some(base..base + len)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let len = self.remaining as usize;
|
|
||||||
(len, Some(len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ErrorResponseBody {
|
|
||||||
storage: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ErrorResponseBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn fields(&self) -> ErrorFields<'_> {
|
|
||||||
ErrorFields { buf: &self.storage }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ErrorFields<'a> {
|
|
||||||
buf: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FallibleIterator for ErrorFields<'a> {
|
|
||||||
type Item = ErrorField<'a>;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> io::Result<Option<ErrorField<'a>>> {
|
|
||||||
let type_ = self.buf.read_u8()?;
|
|
||||||
if type_ == 0 {
|
|
||||||
if self.buf.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: error fields is not drained",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let value_end = find_null(self.buf, 0)?;
|
|
||||||
let value = get_str(&self.buf[..value_end])?;
|
|
||||||
self.buf = &self.buf[value_end + 1..];
|
|
||||||
|
|
||||||
Ok(Some(ErrorField { type_, value }))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ErrorField<'a> {
|
|
||||||
type_: u8,
|
|
||||||
value: &'a str,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ErrorField<'_> {
|
|
||||||
#[inline]
|
|
||||||
pub fn type_(&self) -> u8 {
|
|
||||||
self.type_
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn value(&self) -> &str {
|
|
||||||
self.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct NoticeResponseBody {
|
|
||||||
storage: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NoticeResponseBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn fields(&self) -> ErrorFields<'_> {
|
|
||||||
ErrorFields { buf: &self.storage }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_bytes(&self) -> &[u8] {
|
|
||||||
&self.storage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct NotificationResponseBody {
|
|
||||||
process_id: i32,
|
|
||||||
channel: Bytes,
|
|
||||||
message: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NotificationResponseBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn process_id(&self) -> i32 {
|
|
||||||
self.process_id
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn channel(&self) -> io::Result<&str> {
|
|
||||||
get_str(&self.channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn message(&self) -> io::Result<&str> {
|
|
||||||
get_str(&self.message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ParameterDescriptionBody {
|
|
||||||
storage: Bytes,
|
|
||||||
len: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParameterDescriptionBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn parameters(&self) -> Parameters<'_> {
|
|
||||||
Parameters {
|
|
||||||
buf: &self.storage,
|
|
||||||
remaining: self.len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Parameters<'a> {
|
|
||||||
buf: &'a [u8],
|
|
||||||
remaining: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FallibleIterator for Parameters<'_> {
|
|
||||||
type Item = Oid;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> io::Result<Option<Oid>> {
|
|
||||||
if self.remaining == 0 {
|
|
||||||
if self.buf.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: parameters is not drained",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remaining -= 1;
|
|
||||||
self.buf.read_u32::<BigEndian>().map(Some)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let len = self.remaining as usize;
|
|
||||||
(len, Some(len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ParameterStatusBody {
|
|
||||||
name: Bytes,
|
|
||||||
value: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParameterStatusBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn name(&self) -> io::Result<&str> {
|
|
||||||
get_str(&self.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn value(&self) -> io::Result<&str> {
|
|
||||||
get_str(&self.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ReadyForQueryBody {
|
|
||||||
status: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadyForQueryBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn status(&self) -> u8 {
|
|
||||||
self.status
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RowDescriptionBody {
|
|
||||||
storage: Bytes,
|
|
||||||
len: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RowDescriptionBody {
|
|
||||||
#[inline]
|
|
||||||
pub fn fields(&self) -> Fields<'_> {
|
|
||||||
Fields {
|
|
||||||
buf: &self.storage,
|
|
||||||
remaining: self.len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Fields<'a> {
|
|
||||||
buf: &'a [u8],
|
|
||||||
remaining: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FallibleIterator for Fields<'a> {
|
|
||||||
type Item = Field<'a>;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> io::Result<Option<Field<'a>>> {
|
|
||||||
if self.remaining == 0 {
|
|
||||||
if self.buf.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"invalid message length: field is not drained",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remaining -= 1;
|
|
||||||
let name_end = find_null(self.buf, 0)?;
|
|
||||||
let name = get_str(&self.buf[..name_end])?;
|
|
||||||
self.buf = &self.buf[name_end + 1..];
|
|
||||||
let table_oid = self.buf.read_u32::<BigEndian>()?;
|
|
||||||
let column_id = self.buf.read_i16::<BigEndian>()?;
|
|
||||||
let type_oid = self.buf.read_u32::<BigEndian>()?;
|
|
||||||
let type_size = self.buf.read_i16::<BigEndian>()?;
|
|
||||||
let type_modifier = self.buf.read_i32::<BigEndian>()?;
|
|
||||||
let format = self.buf.read_i16::<BigEndian>()?;
|
|
||||||
|
|
||||||
Ok(Some(Field {
|
|
||||||
name,
|
|
||||||
table_oid,
|
|
||||||
column_id,
|
|
||||||
type_oid,
|
|
||||||
type_size,
|
|
||||||
type_modifier,
|
|
||||||
format,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Field<'a> {
|
|
||||||
name: &'a str,
|
|
||||||
table_oid: Oid,
|
|
||||||
column_id: i16,
|
|
||||||
type_oid: Oid,
|
|
||||||
type_size: i16,
|
|
||||||
type_modifier: i32,
|
|
||||||
format: i16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Field<'a> {
|
|
||||||
#[inline]
|
|
||||||
pub fn name(&self) -> &'a str {
|
|
||||||
self.name
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn table_oid(&self) -> Oid {
|
|
||||||
self.table_oid
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn column_id(&self) -> i16 {
|
|
||||||
self.column_id
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn type_oid(&self) -> Oid {
|
|
||||||
self.type_oid
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn type_size(&self) -> i16 {
|
|
||||||
self.type_size
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn type_modifier(&self) -> i32 {
|
|
||||||
self.type_modifier
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn format(&self) -> i16 {
|
|
||||||
self.format
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn find_null(buf: &[u8], start: usize) -> io::Result<usize> {
|
|
||||||
match memchr(0, &buf[start..]) {
|
|
||||||
Some(pos) => Ok(pos + start),
|
|
||||||
None => Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
"unexpected EOF",
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn get_str(buf: &[u8]) -> io::Result<&str> {
|
|
||||||
str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
|
|
||||||
}
|
|
||||||
@@ -1,309 +0,0 @@
|
|||||||
//! Frontend message serialization.
|
|
||||||
#![allow(missing_docs)]
|
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::error::Error;
|
|
||||||
use std::io;
|
|
||||||
use std::marker;
|
|
||||||
|
|
||||||
use crate::{write_nullable, FromUsize, IsNull, Oid};
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn write_body<F, E>(buf: &mut BytesMut, f: F) -> Result<(), E>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut BytesMut) -> Result<(), E>,
|
|
||||||
E: From<io::Error>,
|
|
||||||
{
|
|
||||||
let base = buf.len();
|
|
||||||
buf.extend_from_slice(&[0; 4]);
|
|
||||||
|
|
||||||
f(buf)?;
|
|
||||||
|
|
||||||
let size = i32::from_usize(buf.len() - base)?;
|
|
||||||
BigEndian::write_i32(&mut buf[base..], size);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum BindError {
|
|
||||||
Conversion(Box<dyn Error + marker::Sync + Send>),
|
|
||||||
Serialization(io::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Box<dyn Error + marker::Sync + Send>> for BindError {
|
|
||||||
#[inline]
|
|
||||||
fn from(e: Box<dyn Error + marker::Sync + Send>) -> BindError {
|
|
||||||
BindError::Conversion(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for BindError {
|
|
||||||
#[inline]
|
|
||||||
fn from(e: io::Error) -> BindError {
|
|
||||||
BindError::Serialization(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn bind<I, J, F, T, K>(
|
|
||||||
portal: &str,
|
|
||||||
statement: &str,
|
|
||||||
formats: I,
|
|
||||||
values: J,
|
|
||||||
mut serializer: F,
|
|
||||||
result_formats: K,
|
|
||||||
buf: &mut BytesMut,
|
|
||||||
) -> Result<(), BindError>
|
|
||||||
where
|
|
||||||
I: IntoIterator<Item = i16>,
|
|
||||||
J: IntoIterator<Item = T>,
|
|
||||||
F: FnMut(T, &mut BytesMut) -> Result<IsNull, Box<dyn Error + marker::Sync + Send>>,
|
|
||||||
K: IntoIterator<Item = i16>,
|
|
||||||
{
|
|
||||||
buf.put_u8(b'B');
|
|
||||||
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
write_cstr(portal.as_bytes(), buf)?;
|
|
||||||
write_cstr(statement.as_bytes(), buf)?;
|
|
||||||
write_counted(
|
|
||||||
formats,
|
|
||||||
|f, buf| {
|
|
||||||
buf.put_i16(f);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
},
|
|
||||||
buf,
|
|
||||||
)?;
|
|
||||||
write_counted(
|
|
||||||
values,
|
|
||||||
|v, buf| write_nullable(|buf| serializer(v, buf), buf),
|
|
||||||
buf,
|
|
||||||
)?;
|
|
||||||
write_counted(
|
|
||||||
result_formats,
|
|
||||||
|f, buf| {
|
|
||||||
buf.put_i16(f);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
},
|
|
||||||
buf,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn write_counted<I, T, F, E>(items: I, mut serializer: F, buf: &mut BytesMut) -> Result<(), E>
|
|
||||||
where
|
|
||||||
I: IntoIterator<Item = T>,
|
|
||||||
F: FnMut(T, &mut BytesMut) -> Result<(), E>,
|
|
||||||
E: From<io::Error>,
|
|
||||||
{
|
|
||||||
let base = buf.len();
|
|
||||||
buf.extend_from_slice(&[0; 2]);
|
|
||||||
let mut count = 0;
|
|
||||||
for item in items {
|
|
||||||
serializer(item, buf)?;
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
let count = i16::from_usize(count)?;
|
|
||||||
BigEndian::write_i16(&mut buf[base..], count);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) {
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_i32(80_877_102);
|
|
||||||
buf.put_i32(process_id);
|
|
||||||
buf.put_i32(secret_key);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn close(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'C');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_u8(variant);
|
|
||||||
write_cstr(name.as_bytes(), buf)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CopyData<T> {
|
|
||||||
buf: T,
|
|
||||||
len: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> CopyData<T>
|
|
||||||
where
|
|
||||||
T: Buf,
|
|
||||||
{
|
|
||||||
pub fn new(buf: T) -> io::Result<CopyData<T>> {
|
|
||||||
let len = buf
|
|
||||||
.remaining()
|
|
||||||
.checked_add(4)
|
|
||||||
.and_then(|l| i32::try_from(l).ok())
|
|
||||||
.ok_or_else(|| {
|
|
||||||
io::Error::new(io::ErrorKind::InvalidInput, "message length overflow")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(CopyData { buf, len })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write(self, out: &mut BytesMut) {
|
|
||||||
out.put_u8(b'd');
|
|
||||||
out.put_i32(self.len);
|
|
||||||
out.put(self.buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn copy_done(buf: &mut BytesMut) {
|
|
||||||
buf.put_u8(b'c');
|
|
||||||
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn copy_fail(message: &str, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'f');
|
|
||||||
write_body(buf, |buf| write_cstr(message.as_bytes(), buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn describe(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'D');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_u8(variant);
|
|
||||||
write_cstr(name.as_bytes(), buf)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn execute(portal: &str, max_rows: i32, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'E');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
write_cstr(portal.as_bytes(), buf)?;
|
|
||||||
buf.put_i32(max_rows);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn parse<I>(name: &str, query: &str, param_types: I, buf: &mut BytesMut) -> io::Result<()>
|
|
||||||
where
|
|
||||||
I: IntoIterator<Item = Oid>,
|
|
||||||
{
|
|
||||||
buf.put_u8(b'P');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
write_cstr(name.as_bytes(), buf)?;
|
|
||||||
write_cstr(query.as_bytes(), buf)?;
|
|
||||||
write_counted(
|
|
||||||
param_types,
|
|
||||||
|t, buf| {
|
|
||||||
buf.put_u32(t);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
},
|
|
||||||
buf,
|
|
||||||
)?;
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn password_message(password: &[u8], buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'p');
|
|
||||||
write_body(buf, |buf| write_cstr(password, buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn query(query: &str, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'Q');
|
|
||||||
write_body(buf, |buf| write_cstr(query.as_bytes(), buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'p');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
write_cstr(mechanism.as_bytes(), buf)?;
|
|
||||||
let len = i32::from_usize(data.len())?;
|
|
||||||
buf.put_i32(len);
|
|
||||||
buf.put_slice(data);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
buf.put_u8(b'p');
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_slice(data);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn ssl_request(buf: &mut BytesMut) {
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
buf.put_i32(80_877_103);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn startup_message(parameters: &StartupMessageParams, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
write_body(buf, |buf| {
|
|
||||||
// postgres protocol version 3.0(196608) in bigger-endian
|
|
||||||
buf.put_i32(0x00_03_00_00);
|
|
||||||
buf.put_slice(¶meters.params);
|
|
||||||
buf.put_u8(0);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
|
||||||
pub struct StartupMessageParams {
|
|
||||||
pub params: BytesMut,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StartupMessageParams {
|
|
||||||
/// Set parameter's value by its name.
|
|
||||||
pub fn insert(&mut self, name: &str, value: &str) {
|
|
||||||
if name.contains('\0') || value.contains('\0') {
|
|
||||||
panic!("startup parameter name or value contained a null")
|
|
||||||
}
|
|
||||||
self.params.put_slice(name.as_bytes());
|
|
||||||
self.params.put_u8(0);
|
|
||||||
self.params.put_slice(value.as_bytes());
|
|
||||||
self.params.put_u8(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn sync(buf: &mut BytesMut) {
|
|
||||||
buf.put_u8(b'S');
|
|
||||||
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn terminate(buf: &mut BytesMut) {
|
|
||||||
buf.put_u8(b'X');
|
|
||||||
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn write_cstr(s: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> {
|
|
||||||
if s.contains(&0) {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"string contains embedded null",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
buf.put_slice(s);
|
|
||||||
buf.put_u8(0);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
//! Postgres message protocol support.
|
|
||||||
//!
|
|
||||||
//! See [Postgres's documentation][docs] for more information on message flow.
|
|
||||||
//!
|
|
||||||
//! [docs]: https://www.postgresql.org/docs/9.5/static/protocol-flow.html
|
|
||||||
|
|
||||||
pub mod backend;
|
|
||||||
pub mod frontend;
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
//! Functions to encrypt a password in the client.
|
|
||||||
//!
|
|
||||||
//! This is intended to be used by client applications that wish to
|
|
||||||
//! send commands like `ALTER USER joe PASSWORD 'pwd'`. The password
|
|
||||||
//! need not be sent in cleartext if it is encrypted on the client
|
|
||||||
//! side. This is good because it ensures the cleartext password won't
|
|
||||||
//! end up in logs pg_stat displays, etc.
|
|
||||||
|
|
||||||
use crate::authentication::sasl;
|
|
||||||
use hmac::{Hmac, Mac};
|
|
||||||
use rand::RngCore;
|
|
||||||
use sha2::digest::FixedOutput;
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test;
|
|
||||||
|
|
||||||
const SCRAM_DEFAULT_ITERATIONS: u32 = 4096;
|
|
||||||
const SCRAM_DEFAULT_SALT_LEN: usize = 16;
|
|
||||||
|
|
||||||
/// Hash password using SCRAM-SHA-256 with a randomly-generated
|
|
||||||
/// salt.
|
|
||||||
///
|
|
||||||
/// The client may assume the returned string doesn't contain any
|
|
||||||
/// special characters that would require escaping in an SQL command.
|
|
||||||
pub async fn scram_sha_256(password: &[u8]) -> String {
|
|
||||||
let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN];
|
|
||||||
let mut rng = rand::thread_rng();
|
|
||||||
rng.fill_bytes(&mut salt);
|
|
||||||
scram_sha_256_salt(password, salt).await
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal implementation of scram_sha_256 with a caller-provided
|
|
||||||
// salt. This is useful for testing.
|
|
||||||
pub(crate) async fn scram_sha_256_salt(
|
|
||||||
password: &[u8],
|
|
||||||
salt: [u8; SCRAM_DEFAULT_SALT_LEN],
|
|
||||||
) -> String {
|
|
||||||
// Prepare the password, per [RFC
|
|
||||||
// 4013](https://tools.ietf.org/html/rfc4013), if possible.
|
|
||||||
//
|
|
||||||
// Postgres treats passwords as byte strings (without embedded NUL
|
|
||||||
// bytes), but SASL expects passwords to be valid UTF-8.
|
|
||||||
//
|
|
||||||
// Follow the behavior of libpq's PQencryptPasswordConn(), and
|
|
||||||
// also the backend. If the password is not valid UTF-8, or if it
|
|
||||||
// contains prohibited characters (such as non-ASCII whitespace),
|
|
||||||
// just skip the SASLprep step and use the original byte
|
|
||||||
// sequence.
|
|
||||||
let prepared: Vec<u8> = match std::str::from_utf8(password) {
|
|
||||||
Ok(password_str) => {
|
|
||||||
match stringprep::saslprep(password_str) {
|
|
||||||
Ok(p) => p.into_owned().into_bytes(),
|
|
||||||
// contains invalid characters; skip saslprep
|
|
||||||
Err(_) => Vec::from(password),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// not valid UTF-8; skip saslprep
|
|
||||||
Err(_) => Vec::from(password),
|
|
||||||
};
|
|
||||||
|
|
||||||
// salt password
|
|
||||||
let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS).await;
|
|
||||||
|
|
||||||
// client key
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
|
|
||||||
.expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(b"Client Key");
|
|
||||||
let client_key = hmac.finalize().into_bytes();
|
|
||||||
|
|
||||||
// stored key
|
|
||||||
let mut hash = Sha256::default();
|
|
||||||
hash.update(client_key.as_slice());
|
|
||||||
let stored_key = hash.finalize_fixed();
|
|
||||||
|
|
||||||
// server key
|
|
||||||
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
|
|
||||||
.expect("HMAC is able to accept all key sizes");
|
|
||||||
hmac.update(b"Server Key");
|
|
||||||
let server_key = hmac.finalize().into_bytes();
|
|
||||||
|
|
||||||
format!(
|
|
||||||
"SCRAM-SHA-256${}:{}${}:{}",
|
|
||||||
SCRAM_DEFAULT_ITERATIONS,
|
|
||||||
base64::encode(salt),
|
|
||||||
base64::encode(stored_key),
|
|
||||||
base64::encode(server_key)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
use crate::password;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_encrypt_scram_sha_256() {
|
|
||||||
// Specify the salt to make the test deterministic. Any bytes will do.
|
|
||||||
let salt: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
|
|
||||||
assert_eq!(
|
|
||||||
password::scram_sha_256_salt(b"secret", salt).await,
|
|
||||||
"SCRAM-SHA-256$4096:AQIDBAUGBwgJCgsMDQ4PEA==$8rrDg00OqaiWXJ7p+sCgHEIaBSHY89ZJl3mfIsf32oY=:05L1f+yZbiN8O0AnO40Og85NNRhvzTS57naKRWCcsIA="
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,294 +0,0 @@
|
|||||||
//! Conversions to and from Postgres's binary format for various types.
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
|
||||||
use bytes::{BufMut, BytesMut};
|
|
||||||
use fallible_iterator::FallibleIterator;
|
|
||||||
use std::boxed::Box as StdBox;
|
|
||||||
use std::error::Error;
|
|
||||||
use std::str;
|
|
||||||
|
|
||||||
use crate::Oid;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test;
|
|
||||||
|
|
||||||
/// Serializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value.
|
|
||||||
#[inline]
|
|
||||||
pub fn text_to_sql(v: &str, buf: &mut BytesMut) {
|
|
||||||
buf.put_slice(v.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value.
|
|
||||||
#[inline]
|
|
||||||
pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
Ok(str::from_utf8(buf)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserializes a `"char"` value.
|
|
||||||
#[inline]
|
|
||||||
pub fn char_from_sql(mut buf: &[u8]) -> Result<i8, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
let v = buf.read_i8()?;
|
|
||||||
if !buf.is_empty() {
|
|
||||||
return Err("invalid buffer size".into());
|
|
||||||
}
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes an `OID` value.
|
|
||||||
#[inline]
|
|
||||||
pub fn oid_to_sql(v: Oid, buf: &mut BytesMut) {
|
|
||||||
buf.put_u32(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserializes an `OID` value.
|
|
||||||
#[inline]
|
|
||||||
pub fn oid_from_sql(mut buf: &[u8]) -> Result<Oid, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
let v = buf.read_u32::<BigEndian>()?;
|
|
||||||
if !buf.is_empty() {
|
|
||||||
return Err("invalid buffer size".into());
|
|
||||||
}
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A fallible iterator over `HSTORE` entries.
|
|
||||||
pub struct HstoreEntries<'a> {
|
|
||||||
remaining: i32,
|
|
||||||
buf: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FallibleIterator for HstoreEntries<'a> {
|
|
||||||
type Item = (&'a str, Option<&'a str>);
|
|
||||||
type Error = StdBox<dyn Error + Sync + Send>;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
fn next(
|
|
||||||
&mut self,
|
|
||||||
) -> Result<Option<(&'a str, Option<&'a str>)>, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
if self.remaining == 0 {
|
|
||||||
if !self.buf.is_empty() {
|
|
||||||
return Err("invalid buffer size".into());
|
|
||||||
}
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remaining -= 1;
|
|
||||||
|
|
||||||
let key_len = self.buf.read_i32::<BigEndian>()?;
|
|
||||||
if key_len < 0 {
|
|
||||||
return Err("invalid key length".into());
|
|
||||||
}
|
|
||||||
let (key, buf) = self.buf.split_at(key_len as usize);
|
|
||||||
let key = str::from_utf8(key)?;
|
|
||||||
self.buf = buf;
|
|
||||||
|
|
||||||
let value_len = self.buf.read_i32::<BigEndian>()?;
|
|
||||||
let value = if value_len < 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let (value, buf) = self.buf.split_at(value_len as usize);
|
|
||||||
let value = str::from_utf8(value)?;
|
|
||||||
self.buf = buf;
|
|
||||||
Some(value)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some((key, value)))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let len = self.remaining as usize;
|
|
||||||
(len, Some(len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserializes an array value.
|
|
||||||
#[inline]
|
|
||||||
pub fn array_from_sql(mut buf: &[u8]) -> Result<Array<'_>, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
let dimensions = buf.read_i32::<BigEndian>()?;
|
|
||||||
if dimensions < 0 {
|
|
||||||
return Err("invalid dimension count".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut r = buf;
|
|
||||||
let mut elements = 1i32;
|
|
||||||
for _ in 0..dimensions {
|
|
||||||
let len = r.read_i32::<BigEndian>()?;
|
|
||||||
if len < 0 {
|
|
||||||
return Err("invalid dimension size".into());
|
|
||||||
}
|
|
||||||
let _lower_bound = r.read_i32::<BigEndian>()?;
|
|
||||||
elements = match elements.checked_mul(len) {
|
|
||||||
Some(elements) => elements,
|
|
||||||
None => return Err("too many array elements".into()),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if dimensions == 0 {
|
|
||||||
elements = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Array {
|
|
||||||
dimensions,
|
|
||||||
elements,
|
|
||||||
buf,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Postgres array.
|
|
||||||
pub struct Array<'a> {
|
|
||||||
dimensions: i32,
|
|
||||||
elements: i32,
|
|
||||||
buf: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Array<'a> {
|
|
||||||
/// Returns an iterator over the dimensions of the array.
|
|
||||||
#[inline]
|
|
||||||
pub fn dimensions(&self) -> ArrayDimensions<'a> {
|
|
||||||
ArrayDimensions(&self.buf[..self.dimensions as usize * 8])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over the values of the array.
|
|
||||||
#[inline]
|
|
||||||
pub fn values(&self) -> ArrayValues<'a> {
|
|
||||||
ArrayValues {
|
|
||||||
remaining: self.elements,
|
|
||||||
buf: &self.buf[self.dimensions as usize * 8..],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An iterator over the dimensions of an array.
|
|
||||||
pub struct ArrayDimensions<'a>(&'a [u8]);
|
|
||||||
|
|
||||||
impl FallibleIterator for ArrayDimensions<'_> {
|
|
||||||
type Item = ArrayDimension;
|
|
||||||
type Error = StdBox<dyn Error + Sync + Send>;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> Result<Option<ArrayDimension>, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
if self.0.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let len = self.0.read_i32::<BigEndian>()?;
|
|
||||||
let lower_bound = self.0.read_i32::<BigEndian>()?;
|
|
||||||
|
|
||||||
Ok(Some(ArrayDimension { len, lower_bound }))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let len = self.0.len() / 8;
|
|
||||||
(len, Some(len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information about a dimension of an array.
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
|
||||||
pub struct ArrayDimension {
|
|
||||||
/// The length of this dimension.
|
|
||||||
pub len: i32,
|
|
||||||
|
|
||||||
/// The base value used to index into this dimension.
|
|
||||||
pub lower_bound: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An iterator over the values of an array, in row-major order.
|
|
||||||
pub struct ArrayValues<'a> {
|
|
||||||
remaining: i32,
|
|
||||||
buf: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FallibleIterator for ArrayValues<'a> {
|
|
||||||
type Item = Option<&'a [u8]>;
|
|
||||||
type Error = StdBox<dyn Error + Sync + Send>;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> Result<Option<Option<&'a [u8]>>, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
if self.remaining == 0 {
|
|
||||||
if !self.buf.is_empty() {
|
|
||||||
return Err("invalid message length: arrayvalue not drained".into());
|
|
||||||
}
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
self.remaining -= 1;
|
|
||||||
|
|
||||||
let len = self.buf.read_i32::<BigEndian>()?;
|
|
||||||
let val = if len < 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
if self.buf.len() < len as usize {
|
|
||||||
return Err("invalid value length".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let (val, buf) = self.buf.split_at(len as usize);
|
|
||||||
self.buf = buf;
|
|
||||||
Some(val)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let len = self.remaining as usize;
|
|
||||||
(len, Some(len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes a Postgres ltree string
|
|
||||||
#[inline]
|
|
||||||
pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) {
|
|
||||||
// A version number is prepended to an ltree string per spec
|
|
||||||
buf.put_u8(1);
|
|
||||||
// Append the rest of the query
|
|
||||||
buf.put_slice(v.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize a Postgres ltree string
|
|
||||||
#[inline]
|
|
||||||
pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
match buf {
|
|
||||||
// Remove the version number from the front of the ltree per spec
|
|
||||||
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
|
|
||||||
_ => Err("ltree version 1 only supported".into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes a Postgres lquery string
|
|
||||||
#[inline]
|
|
||||||
pub fn lquery_to_sql(v: &str, buf: &mut BytesMut) {
|
|
||||||
// A version number is prepended to an lquery string per spec
|
|
||||||
buf.put_u8(1);
|
|
||||||
// Append the rest of the query
|
|
||||||
buf.put_slice(v.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize a Postgres lquery string
|
|
||||||
#[inline]
|
|
||||||
pub fn lquery_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
match buf {
|
|
||||||
// Remove the version number from the front of the lquery per spec
|
|
||||||
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
|
|
||||||
_ => Err("lquery version 1 only supported".into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes a Postgres ltxtquery string
|
|
||||||
#[inline]
|
|
||||||
pub fn ltxtquery_to_sql(v: &str, buf: &mut BytesMut) {
|
|
||||||
// A version number is prepended to an ltxtquery string per spec
|
|
||||||
buf.put_u8(1);
|
|
||||||
// Append the rest of the query
|
|
||||||
buf.put_slice(v.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize a Postgres ltxtquery string
|
|
||||||
#[inline]
|
|
||||||
pub fn ltxtquery_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
|
|
||||||
match buf {
|
|
||||||
// Remove the version number from the front of the ltxtquery per spec
|
|
||||||
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
|
|
||||||
_ => Err("ltxtquery version 1 only supported".into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
use bytes::{Buf, BytesMut};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltree_sql() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
let mut buf = BytesMut::new();
|
|
||||||
|
|
||||||
ltree_to_sql("A.B.C", &mut buf);
|
|
||||||
|
|
||||||
assert_eq!(query.as_slice(), buf.chunk());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltree_str() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
assert!(ltree_from_sql(query.as_slice()).is_ok())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltree_wrong_version() {
|
|
||||||
let mut query = vec![2u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
assert!(ltree_from_sql(query.as_slice()).is_err())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lquery_sql() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
let mut buf = BytesMut::new();
|
|
||||||
|
|
||||||
lquery_to_sql("A.B.C", &mut buf);
|
|
||||||
|
|
||||||
assert_eq!(query.as_slice(), buf.chunk());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lquery_str() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
assert!(lquery_from_sql(query.as_slice()).is_ok())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lquery_wrong_version() {
|
|
||||||
let mut query = vec![2u8];
|
|
||||||
query.extend_from_slice("A.B.C".as_bytes());
|
|
||||||
|
|
||||||
assert!(lquery_from_sql(query.as_slice()).is_err())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltxtquery_sql() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("a & b*".as_bytes());
|
|
||||||
|
|
||||||
let mut buf = BytesMut::new();
|
|
||||||
|
|
||||||
ltree_to_sql("a & b*", &mut buf);
|
|
||||||
|
|
||||||
assert_eq!(query.as_slice(), buf.chunk());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltxtquery_str() {
|
|
||||||
let mut query = vec![1u8];
|
|
||||||
query.extend_from_slice("a & b*".as_bytes());
|
|
||||||
|
|
||||||
assert!(ltree_from_sql(query.as_slice()).is_ok())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ltxtquery_wrong_version() {
|
|
||||||
let mut query = vec![2u8];
|
|
||||||
query.extend_from_slice("a & b*".as_bytes());
|
|
||||||
|
|
||||||
assert!(ltree_from_sql(query.as_slice()).is_err())
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "postgres-types2"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2018"
|
|
||||||
license = "MIT/Apache-2.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bytes.workspace = true
|
|
||||||
fallible-iterator.workspace = true
|
|
||||||
postgres-protocol2 = { path = "../postgres-protocol2" }
|
|
||||||
@@ -1,477 +0,0 @@
|
|||||||
//! Conversions to and from Postgres types.
|
|
||||||
//!
|
|
||||||
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
|
|
||||||
//! unless you want to define your own `ToSql` or `FromSql` definitions.
|
|
||||||
#![doc(html_root_url = "https://docs.rs/postgres-types/0.2")]
|
|
||||||
#![warn(clippy::all, rust_2018_idioms, missing_docs)]
|
|
||||||
|
|
||||||
use fallible_iterator::FallibleIterator;
|
|
||||||
use postgres_protocol2::types;
|
|
||||||
use std::any::type_name;
|
|
||||||
use std::error::Error;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::type_gen::{Inner, Other};
|
|
||||||
|
|
||||||
#[doc(inline)]
|
|
||||||
pub use postgres_protocol2::Oid;
|
|
||||||
|
|
||||||
use bytes::BytesMut;
|
|
||||||
|
|
||||||
/// Generates a simple implementation of `ToSql::accepts` which accepts the
|
|
||||||
/// types passed to it.
|
|
||||||
macro_rules! accepts {
|
|
||||||
($($expected:ident),+) => (
|
|
||||||
fn accepts(ty: &$crate::Type) -> bool {
|
|
||||||
matches!(*ty, $($crate::Type::$expected)|+)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generates an implementation of `ToSql::to_sql_checked`.
|
|
||||||
///
|
|
||||||
/// All `ToSql` implementations should use this macro.
|
|
||||||
macro_rules! to_sql_checked {
|
|
||||||
() => {
|
|
||||||
fn to_sql_checked(
|
|
||||||
&self,
|
|
||||||
ty: &$crate::Type,
|
|
||||||
out: &mut $crate::private::BytesMut,
|
|
||||||
) -> ::std::result::Result<
|
|
||||||
$crate::IsNull,
|
|
||||||
Box<dyn ::std::error::Error + ::std::marker::Sync + ::std::marker::Send>,
|
|
||||||
> {
|
|
||||||
$crate::__to_sql_checked(self, ty, out)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// WARNING: this function is not considered part of this crate's public API.
|
|
||||||
// It is subject to change at any time.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn __to_sql_checked<T>(
|
|
||||||
v: &T,
|
|
||||||
ty: &Type,
|
|
||||||
out: &mut BytesMut,
|
|
||||||
) -> Result<IsNull, Box<dyn Error + Sync + Send>>
|
|
||||||
where
|
|
||||||
T: ToSql,
|
|
||||||
{
|
|
||||||
if !T::accepts(ty) {
|
|
||||||
return Err(Box::new(WrongType::new::<T>(ty.clone())));
|
|
||||||
}
|
|
||||||
v.to_sql(ty, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mod pg_lsn;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub mod private;
|
|
||||||
// mod special;
|
|
||||||
mod type_gen;
|
|
||||||
|
|
||||||
/// A Postgres type.
|
|
||||||
#[derive(PartialEq, Eq, Clone, Hash)]
|
|
||||||
pub struct Type(Inner);
|
|
||||||
|
|
||||||
impl fmt::Debug for Type {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
fmt::Debug::fmt(&self.0, fmt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Type {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self.schema() {
|
|
||||||
"public" | "pg_catalog" => {}
|
|
||||||
schema => write!(fmt, "{}.", schema)?,
|
|
||||||
}
|
|
||||||
fmt.write_str(self.name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Type {
|
|
||||||
/// Creates a new `Type`.
|
|
||||||
pub fn new(name: String, oid: Oid, kind: Kind, schema: String) -> Type {
|
|
||||||
Type(Inner::Other(Arc::new(Other {
|
|
||||||
name,
|
|
||||||
oid,
|
|
||||||
kind,
|
|
||||||
schema,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `Type` corresponding to the provided `Oid` if it
|
|
||||||
/// corresponds to a built-in type.
|
|
||||||
pub fn from_oid(oid: Oid) -> Option<Type> {
|
|
||||||
Inner::from_oid(oid).map(Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the OID of the `Type`.
|
|
||||||
pub fn oid(&self) -> Oid {
|
|
||||||
self.0.oid()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the kind of this type.
|
|
||||||
pub fn kind(&self) -> &Kind {
|
|
||||||
self.0.kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the schema of this type.
|
|
||||||
pub fn schema(&self) -> &str {
|
|
||||||
match self.0 {
|
|
||||||
Inner::Other(ref u) => &u.schema,
|
|
||||||
_ => "pg_catalog",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the name of this type.
|
|
||||||
pub fn name(&self) -> &str {
|
|
||||||
self.0.name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents the kind of a Postgres type.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum Kind {
|
|
||||||
/// A simple type like `VARCHAR` or `INTEGER`.
|
|
||||||
Simple,
|
|
||||||
/// An enumerated type along with its variants.
|
|
||||||
Enum(Vec<String>),
|
|
||||||
/// A pseudo-type.
|
|
||||||
Pseudo,
|
|
||||||
/// An array type along with the type of its elements.
|
|
||||||
Array(Type),
|
|
||||||
/// A range type along with the type of its elements.
|
|
||||||
Range(Type),
|
|
||||||
/// A multirange type along with the type of its elements.
|
|
||||||
Multirange(Type),
|
|
||||||
/// A domain type along with its underlying type.
|
|
||||||
Domain(Type),
|
|
||||||
/// A composite type along with information about its fields.
|
|
||||||
Composite(Vec<Field>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information about a field of a composite type.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
|
||||||
pub struct Field {
|
|
||||||
name: String,
|
|
||||||
type_: Type,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Field {
|
|
||||||
/// Creates a new `Field`.
|
|
||||||
pub fn new(name: String, type_: Type) -> Field {
|
|
||||||
Field { name, type_ }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the name of the field.
|
|
||||||
pub fn name(&self) -> &str {
|
|
||||||
&self.name
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the type of the field.
|
|
||||||
pub fn type_(&self) -> &Type {
|
|
||||||
&self.type_
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An error indicating that a `NULL` Postgres value was passed to a `FromSql`
|
|
||||||
/// implementation that does not support `NULL` values.
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub struct WasNull;
|
|
||||||
|
|
||||||
impl fmt::Display for WasNull {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
fmt.write_str("a Postgres value was `NULL`")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for WasNull {}
|
|
||||||
|
|
||||||
/// An error indicating that a conversion was attempted between incompatible
|
|
||||||
/// Rust and Postgres types.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct WrongType {
|
|
||||||
postgres: Type,
|
|
||||||
rust: &'static str,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for WrongType {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
fmt,
|
|
||||||
"cannot convert between the Rust type `{}` and the Postgres type `{}`",
|
|
||||||
self.rust, self.postgres,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for WrongType {}
|
|
||||||
|
|
||||||
impl WrongType {
|
|
||||||
/// Creates a new `WrongType` error.
|
|
||||||
pub fn new<T>(ty: Type) -> WrongType {
|
|
||||||
WrongType {
|
|
||||||
postgres: ty,
|
|
||||||
rust: type_name::<T>(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An error indicating that a as_text conversion was attempted on a binary
|
|
||||||
/// result.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct WrongFormat {}
|
|
||||||
|
|
||||||
impl Error for WrongFormat {}
|
|
||||||
|
|
||||||
impl fmt::Display for WrongFormat {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
fmt,
|
|
||||||
"cannot read column as text while it is in binary format"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A trait for types that can be created from a Postgres value.
|
|
||||||
pub trait FromSql<'a>: Sized {
|
|
||||||
/// Creates a new value of this type from a buffer of data of the specified
|
|
||||||
/// Postgres `Type` in its binary format.
|
|
||||||
///
|
|
||||||
/// The caller of this method is responsible for ensuring that this type
|
|
||||||
/// is compatible with the Postgres `Type`.
|
|
||||||
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Self, Box<dyn Error + Sync + Send>>;
|
|
||||||
|
|
||||||
/// Creates a new value of this type from a `NULL` SQL value.
|
|
||||||
///
|
|
||||||
/// The caller of this method is responsible for ensuring that this type
|
|
||||||
/// is compatible with the Postgres `Type`.
|
|
||||||
///
|
|
||||||
/// The default implementation returns `Err(Box::new(WasNull))`.
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
fn from_sql_null(ty: &Type) -> Result<Self, Box<dyn Error + Sync + Send>> {
|
|
||||||
Err(Box::new(WasNull))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A convenience function that delegates to `from_sql` and `from_sql_null` depending on the
|
|
||||||
/// value of `raw`.
|
|
||||||
fn from_sql_nullable(
|
|
||||||
ty: &Type,
|
|
||||||
raw: Option<&'a [u8]>,
|
|
||||||
) -> Result<Self, Box<dyn Error + Sync + Send>> {
|
|
||||||
match raw {
|
|
||||||
Some(raw) => Self::from_sql(ty, raw),
|
|
||||||
None => Self::from_sql_null(ty),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Determines if a value of this type can be created from the specified
|
|
||||||
/// Postgres `Type`.
|
|
||||||
fn accepts(ty: &Type) -> bool;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A trait for types which can be created from a Postgres value without borrowing any data.
|
|
||||||
///
|
|
||||||
/// This is primarily useful for trait bounds on functions.
|
|
||||||
pub trait FromSqlOwned: for<'a> FromSql<'a> {}
|
|
||||||
|
|
||||||
impl<T> FromSqlOwned for T where T: for<'a> FromSql<'a> {}
|
|
||||||
|
|
||||||
impl<'a, T: FromSql<'a>> FromSql<'a> for Option<T> {
|
|
||||||
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Option<T>, Box<dyn Error + Sync + Send>> {
|
|
||||||
<T as FromSql>::from_sql(ty, raw).map(Some)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_sql_null(_: &Type) -> Result<Option<T>, Box<dyn Error + Sync + Send>> {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accepts(ty: &Type) -> bool {
|
|
||||||
<T as FromSql>::accepts(ty)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: FromSql<'a>> FromSql<'a> for Vec<T> {
|
|
||||||
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Vec<T>, Box<dyn Error + Sync + Send>> {
|
|
||||||
let member_type = match *ty.kind() {
|
|
||||||
Kind::Array(ref member) => member,
|
|
||||||
_ => panic!("expected array type"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let array = types::array_from_sql(raw)?;
|
|
||||||
if array.dimensions().count()? > 1 {
|
|
||||||
return Err("array contains too many dimensions".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
array
|
|
||||||
.values()
|
|
||||||
.map(|v| T::from_sql_nullable(member_type, v))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accepts(ty: &Type) -> bool {
|
|
||||||
match *ty.kind() {
|
|
||||||
Kind::Array(ref inner) => T::accepts(inner),
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FromSql<'a> for String {
|
|
||||||
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<String, Box<dyn Error + Sync + Send>> {
|
|
||||||
<&str as FromSql>::from_sql(ty, raw).map(ToString::to_string)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accepts(ty: &Type) -> bool {
|
|
||||||
<&str as FromSql>::accepts(ty)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FromSql<'a> for &'a str {
|
|
||||||
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box<dyn Error + Sync + Send>> {
|
|
||||||
match *ty {
|
|
||||||
ref ty if ty.name() == "ltree" => types::ltree_from_sql(raw),
|
|
||||||
ref ty if ty.name() == "lquery" => types::lquery_from_sql(raw),
|
|
||||||
ref ty if ty.name() == "ltxtquery" => types::ltxtquery_from_sql(raw),
|
|
||||||
_ => types::text_from_sql(raw),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accepts(ty: &Type) -> bool {
|
|
||||||
match *ty {
|
|
||||||
Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true,
|
|
||||||
ref ty
|
|
||||||
if (ty.name() == "citext"
|
|
||||||
|| ty.name() == "ltree"
|
|
||||||
|| ty.name() == "lquery"
|
|
||||||
|| ty.name() == "ltxtquery") =>
|
|
||||||
{
|
|
||||||
true
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! simple_from {
|
|
||||||
($t:ty, $f:ident, $($expected:ident),+) => {
|
|
||||||
impl<'a> FromSql<'a> for $t {
|
|
||||||
fn from_sql(_: &Type, raw: &'a [u8]) -> Result<$t, Box<dyn Error + Sync + Send>> {
|
|
||||||
types::$f(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
accepts!($($expected),+);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
simple_from!(i8, char_from_sql, CHAR);
|
|
||||||
simple_from!(u32, oid_from_sql, OID);
|
|
||||||
|
|
||||||
/// An enum representing the nullability of a Postgres value.
|
|
||||||
pub enum IsNull {
|
|
||||||
/// The value is NULL.
|
|
||||||
Yes,
|
|
||||||
/// The value is not NULL.
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A trait for types that can be converted into Postgres values.
|
|
||||||
pub trait ToSql: fmt::Debug {
|
|
||||||
/// Converts the value of `self` into the binary format of the specified
|
|
||||||
/// Postgres `Type`, appending it to `out`.
|
|
||||||
///
|
|
||||||
/// The caller of this method is responsible for ensuring that this type
|
|
||||||
/// is compatible with the Postgres `Type`.
|
|
||||||
///
|
|
||||||
/// The return value indicates if this value should be represented as
|
|
||||||
/// `NULL`. If this is the case, implementations **must not** write
|
|
||||||
/// anything to `out`.
|
|
||||||
fn to_sql(&self, ty: &Type, out: &mut BytesMut) -> Result<IsNull, Box<dyn Error + Sync + Send>>
|
|
||||||
where
|
|
||||||
Self: Sized;
|
|
||||||
|
|
||||||
/// Determines if a value of this type can be converted to the specified
|
|
||||||
/// Postgres `Type`.
|
|
||||||
fn accepts(ty: &Type) -> bool
|
|
||||||
where
|
|
||||||
Self: Sized;
|
|
||||||
|
|
||||||
/// An adaptor method used internally by Rust-Postgres.
|
|
||||||
///
|
|
||||||
/// *All* implementations of this method should be generated by the
|
|
||||||
/// `to_sql_checked!()` macro.
|
|
||||||
fn to_sql_checked(
|
|
||||||
&self,
|
|
||||||
ty: &Type,
|
|
||||||
out: &mut BytesMut,
|
|
||||||
) -> Result<IsNull, Box<dyn Error + Sync + Send>>;
|
|
||||||
|
|
||||||
/// Specify the encode format
|
|
||||||
fn encode_format(&self, _ty: &Type) -> Format {
|
|
||||||
Format::Binary
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Supported Postgres message format types
|
|
||||||
///
|
|
||||||
/// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8`
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
||||||
pub enum Format {
|
|
||||||
/// Text format (UTF-8)
|
|
||||||
Text,
|
|
||||||
/// Compact, typed binary format
|
|
||||||
Binary,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToSql for &str {
|
|
||||||
fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result<IsNull, Box<dyn Error + Sync + Send>> {
|
|
||||||
match *ty {
|
|
||||||
ref ty if ty.name() == "ltree" => types::ltree_to_sql(self, w),
|
|
||||||
ref ty if ty.name() == "lquery" => types::lquery_to_sql(self, w),
|
|
||||||
ref ty if ty.name() == "ltxtquery" => types::ltxtquery_to_sql(self, w),
|
|
||||||
_ => types::text_to_sql(self, w),
|
|
||||||
}
|
|
||||||
Ok(IsNull::No)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accepts(ty: &Type) -> bool {
|
|
||||||
match *ty {
|
|
||||||
Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true,
|
|
||||||
ref ty
|
|
||||||
if (ty.name() == "citext"
|
|
||||||
|| ty.name() == "ltree"
|
|
||||||
|| ty.name() == "lquery"
|
|
||||||
|| ty.name() == "ltxtquery") =>
|
|
||||||
{
|
|
||||||
true
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
to_sql_checked!();
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! simple_to {
|
|
||||||
($t:ty, $f:ident, $($expected:ident),+) => {
|
|
||||||
impl ToSql for $t {
|
|
||||||
fn to_sql(&self,
|
|
||||||
_: &Type,
|
|
||||||
w: &mut BytesMut)
|
|
||||||
-> Result<IsNull, Box<dyn Error + Sync + Send>> {
|
|
||||||
types::$f(*self, w);
|
|
||||||
Ok(IsNull::No)
|
|
||||||
}
|
|
||||||
|
|
||||||
accepts!($($expected),+);
|
|
||||||
|
|
||||||
to_sql_checked!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
simple_to!(u32, oid_to_sql, OID);
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
use crate::{FromSql, Type};
|
|
||||||
pub use bytes::BytesMut;
|
|
||||||
use std::error::Error;
|
|
||||||
|
|
||||||
pub fn read_be_i32(buf: &mut &[u8]) -> Result<i32, Box<dyn Error + Sync + Send>> {
|
|
||||||
if buf.len() < 4 {
|
|
||||||
return Err("invalid buffer size".into());
|
|
||||||
}
|
|
||||||
let mut bytes = [0; 4];
|
|
||||||
bytes.copy_from_slice(&buf[..4]);
|
|
||||||
*buf = &buf[4..];
|
|
||||||
Ok(i32::from_be_bytes(bytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read_value<'a, T>(
|
|
||||||
type_: &Type,
|
|
||||||
buf: &mut &'a [u8],
|
|
||||||
) -> Result<T, Box<dyn Error + Sync + Send>>
|
|
||||||
where
|
|
||||||
T: FromSql<'a>,
|
|
||||||
{
|
|
||||||
let len = read_be_i32(buf)?;
|
|
||||||
let value = if len < 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
if len as usize > buf.len() {
|
|
||||||
return Err("invalid buffer size".into());
|
|
||||||
}
|
|
||||||
let (head, tail) = buf.split_at(len as usize);
|
|
||||||
*buf = tail;
|
|
||||||
Some(head)
|
|
||||||
};
|
|
||||||
T::from_sql_nullable(type_, value)
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user