mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-03 18:50:38 +00:00
Compare commits
538 Commits
conrad/pro
...
release-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03666a1f37 | ||
|
|
9c92242ca0 | ||
|
|
fcfff72454 | ||
|
|
0ad0db6ff8 | ||
|
|
68d8acfd05 | ||
|
|
dc284247a5 | ||
|
|
5c76e2a983 | ||
|
|
237dae71a1 | ||
|
|
43a5e575d6 | ||
|
|
0a117fb1f1 | ||
|
|
4aa9786c6b | ||
|
|
be38123e62 | ||
|
|
ea84ec357f | ||
|
|
30863c0104 | ||
|
|
02f81b6469 | ||
|
|
ad7f14d526 | ||
|
|
b342a02b1c | ||
|
|
4a6556e269 | ||
|
|
95f1920231 | ||
|
|
fda52a0005 | ||
|
|
406cca643b | ||
|
|
b368e62cfc | ||
|
|
4b2f56862d | ||
|
|
a77e87a48a | ||
|
|
1393cc668b | ||
|
|
b33299dc37 | ||
|
|
e9d30edc7f | ||
|
|
1303cd5d05 | ||
|
|
c08759f367 | ||
|
|
ba9722a2fd | ||
|
|
2d4f267983 | ||
|
|
7a598b9842 | ||
|
|
eefad27538 | ||
|
|
cd10c719f9 | ||
|
|
363ea97f69 | ||
|
|
56e6ebfe17 | ||
|
|
1622fd8bda | ||
|
|
8c7dcd2598 | ||
|
|
ee22d4c9ef | ||
|
|
26600f2973 | ||
|
|
b3cd883f93 | ||
|
|
38c7a2abfc | ||
|
|
f94248a594 | ||
|
|
9c53b41245 | ||
|
|
197a89ab3d | ||
|
|
b89e02f3e8 | ||
|
|
04517c6ff3 | ||
|
|
628451d68e | ||
|
|
502d512fe2 | ||
|
|
afda6d4700 | ||
|
|
65042cbadd | ||
|
|
b135194090 | ||
|
|
43dc03459d | ||
|
|
a1b0558493 | ||
|
|
cc138b56f9 | ||
|
|
61fcf64c22 | ||
|
|
6d3e8096fc | ||
|
|
3d1c3a80ae | ||
|
|
835287ba3a | ||
|
|
d63602cc78 | ||
|
|
1668d39b7c | ||
|
|
1d12efc428 | ||
|
|
85696297c5 | ||
|
|
aaf980f70d | ||
|
|
a354071dd0 | ||
|
|
758680d4f8 | ||
|
|
c52514ab02 | ||
|
|
2ee6bc5ec4 | ||
|
|
fd230227f2 | ||
|
|
93e958341f | ||
|
|
7dddbb9570 | ||
|
|
a55853f67f | ||
|
|
007b13b79a | ||
|
|
2dfd3cab8c | ||
|
|
b5833ef259 | ||
|
|
b0e43c2f88 | ||
|
|
e226d7a3d1 | ||
|
|
aa7ab9b3ac | ||
|
|
28ccda0a63 | ||
|
|
59b7ff8988 | ||
|
|
2e4c9c5704 | ||
|
|
3d30a7a934 | ||
|
|
6565fd4056 | ||
|
|
c5e3314c6e | ||
|
|
1ed0e52bc8 | ||
|
|
24d6587914 | ||
|
|
ebcbc1a482 | ||
|
|
117c1b5dde | ||
|
|
f3ecd5d76a | ||
|
|
cf161e1556 | ||
|
|
2521eba674 | ||
|
|
d56fea680e | ||
|
|
7ee5dca752 | ||
|
|
07d1db54b3 | ||
|
|
eeabecd89f | ||
|
|
fcff752851 | ||
|
|
2c91062828 | ||
|
|
ce8eb089f3 | ||
|
|
7dc382601c | ||
|
|
2451969d5c | ||
|
|
59ef701925 | ||
|
|
ac04bad457 | ||
|
|
2f3f98a319 | ||
|
|
5ff4b991c7 | ||
|
|
a93e3d31cc | ||
|
|
6d5687521b | ||
|
|
53721266f1 | ||
|
|
2f3433876f | ||
|
|
58d45c6e86 | ||
|
|
e502e880b5 | ||
|
|
c9a773af37 | ||
|
|
ec0ce06c16 | ||
|
|
1738fd0a96 | ||
|
|
87b7edfc72 | ||
|
|
0bd8eca9ca | ||
|
|
739f627b96 | ||
|
|
def05700d5 | ||
|
|
342cbea255 | ||
|
|
b391b29bdc | ||
|
|
5126ebbfed | ||
|
|
7fa986bc92 | ||
|
|
e8395807a5 | ||
|
|
a3e80448e8 | ||
|
|
ef233e91ef | ||
|
|
dee2041cd3 | ||
|
|
e4bb1ca7d8 | ||
|
|
b987648e71 | ||
|
|
c79c1dd8e9 | ||
|
|
a53db73851 | ||
|
|
9ae980bf4f | ||
|
|
665369c439 | ||
|
|
d7aeca2f34 | ||
|
|
38415a9816 | ||
|
|
597125e124 | ||
|
|
e71d20d392 | ||
|
|
aa0554fd1e | ||
|
|
b853f78136 | ||
|
|
6ad99826c1 | ||
|
|
311ee793b9 | ||
|
|
ad472bd4a1 | ||
|
|
c51db1db61 | ||
|
|
34c1295594 | ||
|
|
b593e51eae | ||
|
|
4c4cb80186 | ||
|
|
92273b6d5e | ||
|
|
e74e7aac93 | ||
|
|
4cca5cdb12 | ||
|
|
9d425b54f7 | ||
|
|
ec790870d5 | ||
|
|
4d7111f240 | ||
|
|
b1fd086c0c | ||
|
|
b6eea65597 | ||
|
|
c42c28b339 | ||
|
|
e4837b0a5a | ||
|
|
14c4fae64a | ||
|
|
cc70fc802d | ||
|
|
fa07097f2f | ||
|
|
b547681e08 | ||
|
|
0fd211537b | ||
|
|
a83bd4e81c | ||
|
|
ecdad5e6d5 | ||
|
|
d028929945 | ||
|
|
7b0e3db868 | ||
|
|
088eb72dd7 | ||
|
|
d550e3f626 | ||
|
|
8c6b41daf5 | ||
|
|
bbb050459b | ||
|
|
cab498c787 | ||
|
|
6359342ffb | ||
|
|
13285c2a5e | ||
|
|
33790d14a3 | ||
|
|
709b8cd371 | ||
|
|
1c9bbf1a92 | ||
|
|
16163fb850 | ||
|
|
73ccc2b08c | ||
|
|
c719be6474 | ||
|
|
718645e56c | ||
|
|
fbc8c36983 | ||
|
|
5519e42612 | ||
|
|
4157eaf4c5 | ||
|
|
60241127e2 | ||
|
|
f7d5322e8b | ||
|
|
41bb9c5280 | ||
|
|
69c0d61c5c | ||
|
|
63cb8ce975 | ||
|
|
907e4aa3c4 | ||
|
|
0a2a84b766 | ||
|
|
85b12ddd52 | ||
|
|
dd76f1eeee | ||
|
|
8963ac85f9 | ||
|
|
4a488b3e24 | ||
|
|
c4987b0b13 | ||
|
|
84b4821118 | ||
|
|
32ba9811f9 | ||
|
|
a0cd64c4d3 | ||
|
|
84687b743d | ||
|
|
b6f93dcec9 | ||
|
|
4f6c594973 | ||
|
|
a750c14735 | ||
|
|
9ce0dd4e55 | ||
|
|
0e1a336607 | ||
|
|
7fc2912d06 | ||
|
|
fdf231c237 | ||
|
|
1e08b5dccc | ||
|
|
030810ed3e | ||
|
|
62b74bdc2c | ||
|
|
8b7e9ed820 | ||
|
|
5dad89acd4 | ||
|
|
547b2d2827 | ||
|
|
93f29a0065 | ||
|
|
4f36494615 | ||
|
|
0a550f3e7d | ||
|
|
4bb9554e4a | ||
|
|
008616cfe6 | ||
|
|
e61ec94fbc | ||
|
|
e5152551ad | ||
|
|
b0822a5499 | ||
|
|
1fb6ab59e8 | ||
|
|
e16439400d | ||
|
|
e401f66698 | ||
|
|
2fa461b668 | ||
|
|
03d90bc0b3 | ||
|
|
268bc890ea | ||
|
|
8a6ee79f6f | ||
|
|
9052c32b46 | ||
|
|
995e729ebe | ||
|
|
76077e1ddf | ||
|
|
0467d88f06 | ||
|
|
f5eec194e7 | ||
|
|
7e00be391d | ||
|
|
d56599df2a | ||
|
|
9d9aab3680 | ||
|
|
a202b1b5cc | ||
|
|
90f731f3b1 | ||
|
|
7736b748d3 | ||
|
|
9c23333cb3 | ||
|
|
66a99009ba | ||
|
|
5d4c57491f | ||
|
|
73935ea3a2 | ||
|
|
32e595d4dd | ||
|
|
b0d69acb07 | ||
|
|
98355a419a | ||
|
|
cfb03d6cf0 | ||
|
|
d81ef3f962 | ||
|
|
5d62c67e75 | ||
|
|
53d53d5b1e | ||
|
|
29fe6ea47a | ||
|
|
640327ccb3 | ||
|
|
7cf0f6b37e | ||
|
|
03c2c569be | ||
|
|
eff6d4538a | ||
|
|
5ef7782e9c | ||
|
|
73101db8c4 | ||
|
|
bccdfc6d39 | ||
|
|
99595813bb | ||
|
|
fe07b54758 | ||
|
|
a42d173e7b | ||
|
|
e07f689238 | ||
|
|
7831eddc88 | ||
|
|
943b1bc80c | ||
|
|
95a184e9b7 | ||
|
|
3fa17e9d17 | ||
|
|
55e0fd9789 | ||
|
|
2a88889f44 | ||
|
|
5bad8126dc | ||
|
|
27bc242085 | ||
|
|
192b49cc6d | ||
|
|
e1b60f3693 | ||
|
|
2804f5323b | ||
|
|
676adc6b32 | ||
|
|
96a4e8de66 | ||
|
|
01180666b0 | ||
|
|
6c94269c32 | ||
|
|
edc691647d | ||
|
|
855d7b4781 | ||
|
|
c49c9707ce | ||
|
|
2227540a0d | ||
|
|
f1347f2417 | ||
|
|
30b295b017 | ||
|
|
1cef395266 | ||
|
|
78d160f76d | ||
|
|
b9238059d6 | ||
|
|
d0cb4b88c8 | ||
|
|
1ec3e39d4e | ||
|
|
a1a74eef2c | ||
|
|
90e689adda | ||
|
|
f0b2d4b053 | ||
|
|
299d9474c9 | ||
|
|
7234208b36 | ||
|
|
93450f11f5 | ||
|
|
2f0f9edf33 | ||
|
|
d424f2b7c8 | ||
|
|
21315e80bc | ||
|
|
483b66d383 | ||
|
|
aa72a22661 | ||
|
|
5c0264b591 | ||
|
|
9f13277729 | ||
|
|
54aa319805 | ||
|
|
4a227484bf | ||
|
|
2f83f85291 | ||
|
|
d6cfcb0d93 | ||
|
|
392843ad2a | ||
|
|
bd4dae8f4a | ||
|
|
b05fe53cfd | ||
|
|
c13a2f0df1 | ||
|
|
39be366fc5 | ||
|
|
6eda0a3158 | ||
|
|
306c7a1813 | ||
|
|
80be423a58 | ||
|
|
5dcfef82f2 | ||
|
|
e67b8f69c0 | ||
|
|
e546872ab4 | ||
|
|
322ea1cf7c | ||
|
|
3633742de9 | ||
|
|
079d3a37ba | ||
|
|
a46e77b476 | ||
|
|
a92702b01e | ||
|
|
8ff3253f20 | ||
|
|
04b82c92a7 | ||
|
|
e5bf423e68 | ||
|
|
60af392e45 | ||
|
|
661fc41e71 | ||
|
|
702c488f32 | ||
|
|
45c5122754 | ||
|
|
558394f710 | ||
|
|
73b0898608 | ||
|
|
e65be4c2dc | ||
|
|
40087b8164 | ||
|
|
c762b59483 | ||
|
|
5d71601ca9 | ||
|
|
a113c3e433 | ||
|
|
e81fc598f4 | ||
|
|
48b845fa76 | ||
|
|
27096858dc | ||
|
|
4430d0ae7d | ||
|
|
6e183aa0de | ||
|
|
fd6d0b7635 | ||
|
|
3710c32aae | ||
|
|
be83bee49d | ||
|
|
cf28e5922a | ||
|
|
7d384d6953 | ||
|
|
4b3b37b912 | ||
|
|
1d8d200f4d | ||
|
|
0d80d6ce18 | ||
|
|
f653ee039f | ||
|
|
e614a95853 | ||
|
|
850db4cc13 | ||
|
|
8a316b1277 | ||
|
|
4d13bae449 | ||
|
|
49377abd98 | ||
|
|
a6b2f4e54e | ||
|
|
face60d50b | ||
|
|
9768aa27f2 | ||
|
|
96b2e575e1 | ||
|
|
7222777784 | ||
|
|
5469fdede0 | ||
|
|
72aa6b9fdd | ||
|
|
ae0634b7be | ||
|
|
70711f32fa | ||
|
|
52a88af0aa | ||
|
|
b7a43bf817 | ||
|
|
dce91b33a4 | ||
|
|
23ee4f3050 | ||
|
|
46857e8282 | ||
|
|
368ab0ce54 | ||
|
|
a5987eebfd | ||
|
|
6686ede30f | ||
|
|
373c7057cc | ||
|
|
7d6ec16166 | ||
|
|
0e6fdc8a58 | ||
|
|
521438a5c6 | ||
|
|
07d7874bc8 | ||
|
|
1804111a02 | ||
|
|
cd0178efed | ||
|
|
333574be57 | ||
|
|
79a799a143 | ||
|
|
9da06af6c9 | ||
|
|
ce1753d036 | ||
|
|
67db8432b4 | ||
|
|
4e2e44e524 | ||
|
|
ed786104f3 | ||
|
|
84b74f2bd1 | ||
|
|
fec2ad6283 | ||
|
|
98eebd4682 | ||
|
|
2f74287c9b | ||
|
|
aee1bf95e3 | ||
|
|
b9de9d75ff | ||
|
|
7943b709e6 | ||
|
|
d7d066d493 | ||
|
|
e78ac22107 | ||
|
|
76a8f2bb44 | ||
|
|
8d59a8581f | ||
|
|
b1ddd01289 | ||
|
|
6eae4fc9aa | ||
|
|
765455bca2 | ||
|
|
4204960942 | ||
|
|
67345d66ea | ||
|
|
2266ee5971 | ||
|
|
b58445d855 | ||
|
|
36050e7f3d | ||
|
|
33360ed96d | ||
|
|
39a28d1108 | ||
|
|
efa6aa134f | ||
|
|
2c724e56e2 | ||
|
|
feff887c6f | ||
|
|
353d915fcf | ||
|
|
2e38098cbc | ||
|
|
a6fe5ea1ac | ||
|
|
05b0aed0c1 | ||
|
|
cd1705357d | ||
|
|
6bc7561290 | ||
|
|
fbd3ac14b5 | ||
|
|
e437787c8f | ||
|
|
3460dbf90b | ||
|
|
6b89d99677 | ||
|
|
6cc8ea86e4 | ||
|
|
e62a492d6f | ||
|
|
a475cdf642 | ||
|
|
7002c79a47 | ||
|
|
ee6cf357b4 | ||
|
|
e5c2086b5f | ||
|
|
5f1208296a | ||
|
|
88e8e473cd | ||
|
|
b0a77844f6 | ||
|
|
1baf464307 | ||
|
|
e9b8e81cea | ||
|
|
85d6194aa4 | ||
|
|
333a7a68ef | ||
|
|
6aa4e41bee | ||
|
|
840183e51f | ||
|
|
cbccc94b03 | ||
|
|
fce227df22 | ||
|
|
bd787e800f | ||
|
|
4a7704b4a3 | ||
|
|
ff1119da66 | ||
|
|
4c3ba1627b | ||
|
|
1407174fb2 | ||
|
|
ec9dcb1889 | ||
|
|
d11d781afc | ||
|
|
4e44565b71 | ||
|
|
4ed51ad33b | ||
|
|
1c1ebe5537 | ||
|
|
c19cb7f386 | ||
|
|
4b97d31b16 | ||
|
|
923ade3dd7 | ||
|
|
b04e711975 | ||
|
|
afd0a6b39a | ||
|
|
99752286d8 | ||
|
|
15df93363c | ||
|
|
bc0ab741af | ||
|
|
51d9dfeaa3 | ||
|
|
f63cb18155 | ||
|
|
0de603d88e | ||
|
|
240913912a | ||
|
|
91a4ea0de2 | ||
|
|
8608704f49 | ||
|
|
efef68ce99 | ||
|
|
8daefd24da | ||
|
|
46cc8b7982 | ||
|
|
38cd90dd0c | ||
|
|
a51b269f15 | ||
|
|
43bf6d0a0f | ||
|
|
15273a9b66 | ||
|
|
78aca668d0 | ||
|
|
acbf4148ea | ||
|
|
6508540561 | ||
|
|
a41b5244a8 | ||
|
|
2b3189be95 | ||
|
|
248563c595 | ||
|
|
14cd6ca933 | ||
|
|
eb36403e71 | ||
|
|
3c6f779698 | ||
|
|
f67f0c1c11 | ||
|
|
edb02d3299 | ||
|
|
664a69e65b | ||
|
|
478322ebf9 | ||
|
|
802f174072 | ||
|
|
47f9890bae | ||
|
|
262265daad | ||
|
|
300da5b872 | ||
|
|
7b22b5c433 | ||
|
|
ffca97bc1e | ||
|
|
cb356f3259 | ||
|
|
c85374295f | ||
|
|
4992160677 | ||
|
|
bd535b3371 | ||
|
|
d90c5a03af | ||
|
|
2d02cc9079 | ||
|
|
49ad94b99f | ||
|
|
948a217398 | ||
|
|
125381eae7 | ||
|
|
cd01bbc715 | ||
|
|
d8b5e3b88d | ||
|
|
06d25f2186 | ||
|
|
f759b561f3 | ||
|
|
ece0555600 | ||
|
|
73ea0a0b01 | ||
|
|
d8f6d6fd6f | ||
|
|
d24de169a7 | ||
|
|
0816168296 | ||
|
|
277b44d57a | ||
|
|
68c2c3880e | ||
|
|
49da498f65 | ||
|
|
2c76ba3dd7 | ||
|
|
dbe3dc69ad | ||
|
|
8e5bb3ed49 | ||
|
|
ab0be7b8da | ||
|
|
b4c55f5d24 | ||
|
|
ede70d833c | ||
|
|
70c3d18bb0 | ||
|
|
7a491f52c4 | ||
|
|
323c4ecb4f | ||
|
|
3d2466607e | ||
|
|
ed478b39f4 | ||
|
|
91585a558d | ||
|
|
93467eae1f | ||
|
|
f3aac81d19 | ||
|
|
979ad60c19 | ||
|
|
9316cb1b1f | ||
|
|
e7939a527a | ||
|
|
36d26665e1 | ||
|
|
873347f977 | ||
|
|
e814ac16f9 | ||
|
|
ad3055d386 | ||
|
|
94e03eb452 | ||
|
|
380f26ef79 | ||
|
|
3c5b7f59d7 | ||
|
|
fee89f80b5 | ||
|
|
41cce8eaf1 | ||
|
|
f88fe0218d | ||
|
|
cc856eca85 | ||
|
|
cf350c6002 | ||
|
|
0ce6b6a0a3 | ||
|
|
73f247d537 | ||
|
|
960be82183 | ||
|
|
806e5a6c19 | ||
|
|
8d5df07cce | ||
|
|
df7a9d1407 |
@@ -3,6 +3,16 @@
|
|||||||
# by the RUSTDOCFLAGS env var in CI.
|
# by the RUSTDOCFLAGS env var in CI.
|
||||||
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
||||||
|
|
||||||
|
# Enable frame pointers. This may have a minor performance overhead, but makes it easier and more
|
||||||
|
# efficient to obtain stack traces (and thus CPU/heap profiles). It may also avoid seg faults that
|
||||||
|
# we've seen with libunwind-based profiling. See also:
|
||||||
|
#
|
||||||
|
# * <https://www.brendangregg.com/blog/2024-03-17/the-return-of-the-frame-pointers.html>
|
||||||
|
# * <https://github.com/rust-lang/rust/pull/122646>
|
||||||
|
#
|
||||||
|
# NB: the RUSTFLAGS envvar will replace this. Make sure to update e.g. Dockerfile as well.
|
||||||
|
rustflags = ["-Cforce-frame-pointers=yes"]
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
neon = ["run", "--bin", "neon_local"]
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
4
.github/actionlint.yml
vendored
4
.github/actionlint.yml
vendored
@@ -21,3 +21,7 @@ config-variables:
|
|||||||
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
||||||
- DEV_AWS_OIDC_ROLE_ARN
|
- DEV_AWS_OIDC_ROLE_ARN
|
||||||
- BENCHMARK_INGEST_TARGET_PROJECTID
|
- BENCHMARK_INGEST_TARGET_PROJECTID
|
||||||
|
- PGREGRESS_PG16_PROJECT_ID
|
||||||
|
- PGREGRESS_PG17_PROJECT_ID
|
||||||
|
- SLACK_ON_CALL_QA_STAGING_STREAM
|
||||||
|
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN
|
||||||
|
|||||||
@@ -7,10 +7,9 @@ inputs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
aws_oicd_role_arn:
|
aws-oicd-role-arn:
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
description: 'OIDC role arn to interract with S3'
|
||||||
required: false
|
required: true
|
||||||
default: ''
|
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
base-url:
|
base-url:
|
||||||
@@ -84,12 +83,11 @@ runs:
|
|||||||
ALLURE_VERSION: 2.27.0
|
ALLURE_VERSION: 2.27.0
|
||||||
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
- uses: aws-actions/configure-aws-credentials@v4
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
if: ${{ !cancelled() }}
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
|||||||
14
.github/actions/allure-report-store/action.yml
vendored
14
.github/actions/allure-report-store/action.yml
vendored
@@ -8,10 +8,9 @@ inputs:
|
|||||||
unique-key:
|
unique-key:
|
||||||
description: 'string to distinguish different results in the same run'
|
description: 'string to distinguish different results in the same run'
|
||||||
required: true
|
required: true
|
||||||
aws_oicd_role_arn:
|
aws-oicd-role-arn:
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
description: 'OIDC role arn to interract with S3'
|
||||||
required: false
|
required: true
|
||||||
default: ''
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -36,12 +35,11 @@ runs:
|
|||||||
env:
|
env:
|
||||||
REPORT_DIR: ${{ inputs.report-dir }}
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
- uses: aws-actions/configure-aws-credentials@v4
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
if: ${{ !cancelled() }}
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
|
|||||||
9
.github/actions/download/action.yml
vendored
9
.github/actions/download/action.yml
vendored
@@ -15,10 +15,19 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
|
aws-oicd-role-arn:
|
||||||
|
description: 'OIDC role arn to interract with S3'
|
||||||
|
required: true
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
|
- uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Download artifact
|
- name: Download artifact
|
||||||
id: download-artifact
|
id: download-artifact
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
20
.github/actions/run-python-test-set/action.yml
vendored
20
.github/actions/run-python-test-set/action.yml
vendored
@@ -48,10 +48,9 @@ inputs:
|
|||||||
description: 'benchmark durations JSON'
|
description: 'benchmark durations JSON'
|
||||||
required: false
|
required: false
|
||||||
default: '{}'
|
default: '{}'
|
||||||
aws_oicd_role_arn:
|
aws-oicd-role-arn:
|
||||||
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
description: 'OIDC role arn to interract with S3'
|
||||||
required: false
|
required: true
|
||||||
default: ''
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -62,6 +61,7 @@ runs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|
||||||
- name: Download Neon binaries for the previous release
|
- name: Download Neon binaries for the previous release
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -70,6 +70,7 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|
||||||
- name: Download compatibility snapshot
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -81,6 +82,7 @@ runs:
|
|||||||
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
||||||
# shouldn't fail the whole job. Only relevant test should fail.
|
# shouldn't fail the whole job. Only relevant test should fail.
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
@@ -218,17 +220,19 @@ runs:
|
|||||||
# The lack of compatibility snapshot shouldn't fail the job
|
# The lack of compatibility snapshot shouldn't fail the job
|
||||||
# (for example if we didn't run the test for non build-and-test workflow)
|
# (for example if we didn't run the test for non build-and-test workflow)
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|
||||||
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
- uses: aws-actions/configure-aws-credentials@v4
|
||||||
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
if: ${{ !cancelled() }}
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-store
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
report-dir: /tmp/test_output/allure/results
|
report-dir: /tmp/test_output/allure/results
|
||||||
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|||||||
@@ -14,9 +14,11 @@ runs:
|
|||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|
||||||
- name: Upload coverage data
|
- name: Upload coverage data
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
|
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
|||||||
11
.github/actions/upload/action.yml
vendored
11
.github/actions/upload/action.yml
vendored
@@ -14,6 +14,10 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
|
aws-oicd-role-arn:
|
||||||
|
description: "the OIDC role arn for aws auth"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -53,6 +57,13 @@ runs:
|
|||||||
|
|
||||||
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
12
.github/file-filters.yaml
vendored
Normal file
12
.github/file-filters.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
rust_code: ['**/*.rs', '**/Cargo.toml', '**/Cargo.lock']
|
||||||
|
|
||||||
|
v14: ['vendor/postgres-v14/**', 'Makefile', 'pgxn/**']
|
||||||
|
v15: ['vendor/postgres-v15/**', 'Makefile', 'pgxn/**']
|
||||||
|
v16: ['vendor/postgres-v16/**', 'Makefile', 'pgxn/**']
|
||||||
|
v17: ['vendor/postgres-v17/**', 'Makefile', 'pgxn/**']
|
||||||
|
|
||||||
|
rebuild_neon_extra:
|
||||||
|
- .github/workflows/neon_extra_builds.yml
|
||||||
|
|
||||||
|
rebuild_macos:
|
||||||
|
- .github/workflows/build-macos.yml
|
||||||
@@ -70,6 +70,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
||||||
- name: Create benchmark_restore_status table if it does not exist
|
- name: Create benchmark_restore_status table if it does not exist
|
||||||
|
|||||||
20
.github/workflows/_build-and-test-locally.yml
vendored
20
.github/workflows/_build-and-test-locally.yml
vendored
@@ -31,12 +31,13 @@ defaults:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
contents: read
|
||||||
container:
|
container:
|
||||||
image: ${{ inputs.build-tools-image }}
|
image: ${{ inputs.build-tools-image }}
|
||||||
credentials:
|
credentials:
|
||||||
@@ -205,6 +206,13 @@ jobs:
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 18000 # 5 hours
|
||||||
|
|
||||||
- name: Run rust tests
|
- name: Run rust tests
|
||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
@@ -256,6 +264,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
@@ -265,6 +274,10 @@ jobs:
|
|||||||
regress-tests:
|
regress-tests:
|
||||||
# Don't run regression tests on debug arm64 builds
|
# Don't run regression tests on debug arm64 builds
|
||||||
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
contents: read
|
||||||
|
statuses: write
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
container:
|
container:
|
||||||
@@ -283,7 +296,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
|
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }}
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
with:
|
with:
|
||||||
@@ -295,6 +308,7 @@ jobs:
|
|||||||
real_s3_region: eu-central-1
|
real_s3_region: eu-central-1
|
||||||
rerun_failed: true
|
rerun_failed: true
|
||||||
pg_version: ${{ matrix.pg_version }}
|
pg_version: ${{ matrix.pg_version }}
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
|
|||||||
2
.github/workflows/actionlint.yml
vendored
2
.github/workflows/actionlint.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
||||||
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
||||||
with:
|
with:
|
||||||
fail_on_error: true
|
fail_level: error
|
||||||
filter_mode: nofilter
|
filter_mode: nofilter
|
||||||
level: error
|
level: error
|
||||||
|
|
||||||
|
|||||||
67
.github/workflows/benchmarking.yml
vendored
67
.github/workflows/benchmarking.yml
vendored
@@ -105,6 +105,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -122,7 +123,7 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# to ensure tests are running in order of appears in the file.
|
# to ensure tests are running in order of appears in the file.
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
@@ -152,7 +153,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -204,6 +205,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Run Logical Replication benchmarks
|
- name: Run Logical Replication benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -214,7 +216,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -231,7 +233,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -243,7 +245,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -306,6 +308,7 @@ jobs:
|
|||||||
"image": [ "'"$image_default"'" ],
|
"image": [ "'"$image_default"'" ],
|
||||||
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
@@ -405,9 +408,10 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-new-many-tables", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
@@ -426,7 +430,7 @@ jobs:
|
|||||||
neonvm-captest-sharding-reuse)
|
neonvm-captest-sharding-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
neonvm-captest-new | neonvm-captest-new-many-tables | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -443,6 +447,26 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# we want to compare Neon project OLTP throughput and latency at scale factor 10 GB
|
||||||
|
# without (neonvm-captest-new)
|
||||||
|
# and with (neonvm-captest-new-many-tables) many relations in the database
|
||||||
|
- name: Create many relations before the run
|
||||||
|
if: contains(fromJson('["neonvm-captest-new-many-tables"]'), matrix.platform)
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_perf_many_relations
|
||||||
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
TEST_NUM_RELATIONS: 10000
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -452,7 +476,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -467,7 +491,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -482,7 +506,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -500,7 +524,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -611,7 +635,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -626,7 +650,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600
|
extra_params: -m remote_cluster --timeout 21600
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -637,7 +661,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -708,6 +732,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -739,7 +764,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -753,7 +778,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -818,6 +843,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Get Connstring Secret Name
|
- name: Get Connstring Secret Name
|
||||||
run: |
|
run: |
|
||||||
@@ -856,7 +882,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -868,7 +894,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -926,6 +952,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -957,7 +984,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -968,7 +995,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
|||||||
241
.github/workflows/build-macos.yml
vendored
Normal file
241
.github/workflows/build-macos.yml
vendored
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
name: Check neon with MacOS builds
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
pg_versions:
|
||||||
|
description: "Array of the pg versions to build for, for example: ['v14', 'v17']"
|
||||||
|
type: string
|
||||||
|
default: '[]'
|
||||||
|
required: false
|
||||||
|
rebuild_rust_code:
|
||||||
|
description: "Rebuild Rust code"
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
required: false
|
||||||
|
rebuild_everything:
|
||||||
|
description: "If true, rebuild for all versions"
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
required: false
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
COPT: '-Werror'
|
||||||
|
|
||||||
|
# TODO: move `check-*` and `files-changed` jobs to the "Caller" Workflow
|
||||||
|
# We should care about that as Github has limitations:
|
||||||
|
# - You can connect up to four levels of workflows
|
||||||
|
# - You can call a maximum of 20 unique reusable workflows from a single workflow file.
|
||||||
|
# https://docs.github.com/en/actions/sharing-automations/reusing-workflows#limitations
|
||||||
|
jobs:
|
||||||
|
build-pgxn:
|
||||||
|
if: |
|
||||||
|
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
|
github.ref_name == 'main'
|
||||||
|
)
|
||||||
|
timeout-minutes: 30
|
||||||
|
runs-on: macos-15
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
postgres-version: ${{ inputs.rebuild_everything && fromJson('["v14", "v15", "v16", "v17"]') || fromJSON(inputs.pg_versions) }}
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
steps:
|
||||||
|
- name: Checkout main repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set pg ${{ matrix.postgres-version }} for caching
|
||||||
|
id: pg_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-${{ matrix.postgres-version }}) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Cache postgres ${{ matrix.postgres-version }} build
|
||||||
|
id: cache_pg
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/${{ matrix.postgres-version }}
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ matrix.postgres-version }}-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Checkout submodule vendor/postgres-${{ matrix.postgres-version }}
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
git submodule init vendor/postgres-${{ matrix.postgres-version }}
|
||||||
|
git submodule update --depth 1 --recursive
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
brew install flex bison openssl protobuf icu4c
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build Postgres ${{ matrix.postgres-version }}
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
make postgres-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build Neon Pg Ext ${{ matrix.postgres-version }}
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
make "neon-pg-ext-${{ matrix.postgres-version }}" -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Get postgres headers ${{ matrix.postgres-version }}
|
||||||
|
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
make postgres-headers-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
build-walproposer-lib:
|
||||||
|
if: |
|
||||||
|
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
|
github.ref_name == 'main'
|
||||||
|
)
|
||||||
|
timeout-minutes: 30
|
||||||
|
runs-on: macos-15
|
||||||
|
needs: [build-pgxn]
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
steps:
|
||||||
|
- name: Checkout main repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set pg v17 for caching
|
||||||
|
id: pg_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Cache postgres v17 build
|
||||||
|
id: cache_pg
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v17
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache walproposer-lib
|
||||||
|
id: cache_walproposer_lib
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/build/walproposer-lib
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Checkout submodule vendor/postgres-v17
|
||||||
|
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
git submodule init vendor/postgres-v17
|
||||||
|
git submodule update --depth 1 --recursive
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
brew install flex bison openssl protobuf icu4c
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
|
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build walproposer-lib (only for v17)
|
||||||
|
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
||||||
|
run:
|
||||||
|
make walproposer-lib -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
cargo-build:
|
||||||
|
if: |
|
||||||
|
(inputs.pg_versions != '[]' || inputs.rebuild_rust_code || inputs.rebuild_everything) && (
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
|
github.ref_name == 'main'
|
||||||
|
)
|
||||||
|
timeout-minutes: 30
|
||||||
|
runs-on: macos-15
|
||||||
|
needs: [build-pgxn, build-walproposer-lib]
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
steps:
|
||||||
|
- name: Checkout main repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Set pg v14 for caching
|
||||||
|
id: pg_rev_v14
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
- name: Set pg v15 for caching
|
||||||
|
id: pg_rev_v15
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
- name: Set pg v16 for caching
|
||||||
|
id: pg_rev_v16
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
- name: Set pg v17 for caching
|
||||||
|
id: pg_rev_v17
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v14
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v14-${{ steps.pg_rev_v14.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
- name: Cache postgres v15 build
|
||||||
|
id: cache_pg_v15
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v15-${{ steps.pg_rev_v15.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
- name: Cache postgres v16 build
|
||||||
|
id: cache_pg_v16
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v16
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v16-${{ steps.pg_rev_v16.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
- name: Cache postgres v17 build
|
||||||
|
id: cache_pg_v17
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v17
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache cargo deps (only for v17)
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
||||||
|
|
||||||
|
- name: Cache walproposer-lib
|
||||||
|
id: cache_walproposer_lib
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/build/walproposer-lib
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
brew install flex bison openssl protobuf icu4c
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
|
run: |
|
||||||
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Run cargo build (only for v17)
|
||||||
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Check that no warnings are produced (only for v17)
|
||||||
|
run: ./run_clippy.sh
|
||||||
268
.github/workflows/build_and_test.yml
vendored
268
.github/workflows/build_and_test.yml
vendored
@@ -21,8 +21,6 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
||||||
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
|
|
||||||
@@ -214,7 +212,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
- name: Run cargo clippy (debug)
|
- name: Run cargo clippy (debug)
|
||||||
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
run: cargo hack --features default --ignore-unknown-features --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
- name: Check documentation generation
|
- name: Check documentation generation
|
||||||
run: cargo doc --workspace --no-deps --document-private-items
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
@@ -255,15 +253,15 @@ jobs:
|
|||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
build-tag: ${{ needs.tag.outputs.build-tag }}
|
build-tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds.
|
||||||
# run without LFC on v17 release only
|
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled.
|
||||||
test-cfg: |
|
test-cfg: |
|
||||||
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
|
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"},
|
||||||
{"pg_version":"v15", "lfc_state": "without-lfc"},
|
{"pg_version":"v15", "lfc_state": "with-lfc"},
|
||||||
{"pg_version":"v16", "lfc_state": "without-lfc"},
|
{"pg_version":"v16", "lfc_state": "with-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "without-lfc"},
|
{"pg_version":"v17", "lfc_state": "with-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|
{"pg_version":"v17", "lfc_state": "without-lfc"}]'
|
||||||
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
|
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
||||||
@@ -305,6 +303,11 @@ jobs:
|
|||||||
benchmarks:
|
benchmarks:
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
||||||
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -333,6 +336,7 @@ jobs:
|
|||||||
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
||||||
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -345,6 +349,11 @@ jobs:
|
|||||||
report-benchmarks-failures:
|
report-benchmarks-failures:
|
||||||
needs: [ benchmarks, create-test-report ]
|
needs: [ benchmarks, create-test-report ]
|
||||||
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -360,6 +369,11 @@ jobs:
|
|||||||
create-test-report:
|
create-test-report:
|
||||||
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
||||||
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
outputs:
|
outputs:
|
||||||
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
|
|
||||||
@@ -380,6 +394,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -411,6 +426,10 @@ jobs:
|
|||||||
coverage-report:
|
coverage-report:
|
||||||
if: ${{ !startsWith(github.ref_name, 'release') }}
|
if: ${{ !startsWith(github.ref_name, 'release') }}
|
||||||
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -437,12 +456,14 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Get coverage artifact
|
- name: Get coverage artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
@@ -517,7 +538,7 @@ jobs:
|
|||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
||||||
needs: [ check-permissions, promote-images, tag ]
|
needs: [ check-permissions, promote-images-dev, tag ]
|
||||||
uses: ./.github/workflows/trigger-e2e-tests.yml
|
uses: ./.github/workflows/trigger-e2e-tests.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@@ -573,6 +594,10 @@ jobs:
|
|||||||
neon-image:
|
neon-image:
|
||||||
needs: [ neon-image-arch, tag ]
|
needs: [ neon-image-arch, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -587,11 +612,15 @@ jobs:
|
|||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
aws-region: eu-central-1
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
- name: Push multi-arch image to ECR
|
- name: Push multi-arch image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -600,6 +629,10 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image-arch:
|
compute-node-image-arch:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -640,11 +673,15 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
aws-region: eu-central-1
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
@@ -717,6 +754,10 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
needs: [ compute-node-image-arch, tag ]
|
needs: [ compute-node-image-arch, tag ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -761,11 +802,15 @@ jobs:
|
|||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
aws-region: eu-central-1
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -795,7 +840,7 @@ jobs:
|
|||||||
- pg: v17
|
- pg: v17
|
||||||
debian: bookworm
|
debian: bookworm
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.35.0
|
VM_BUILDER_VERSION: v0.37.1
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -885,12 +930,14 @@ jobs:
|
|||||||
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
||||||
docker compose -f ./docker-compose/docker-compose.yml down
|
docker compose -f ./docker-compose/docker-compose.yml down
|
||||||
|
|
||||||
promote-images:
|
promote-images-dev:
|
||||||
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
needs: [ check-permissions, tag, vm-compute-node-image ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # for `aws-actions/configure-aws-credentials`
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
env:
|
env:
|
||||||
VERSIONS: v14 v15 v16 v17
|
VERSIONS: v14 v15 v16 v17
|
||||||
@@ -901,12 +948,15 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Login to dev ECR
|
- name: Configure AWS credentials
|
||||||
uses: docker/login-action@v3
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
aws-region: eu-central-1
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
- name: Copy vm-compute-node images to ECR
|
- name: Copy vm-compute-node images to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -915,6 +965,35 @@ jobs:
|
|||||||
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
done
|
done
|
||||||
|
|
||||||
|
promote-images-prod:
|
||||||
|
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
env:
|
||||||
|
VERSIONS: v14 v15 v16 v17
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Add latest tag to images
|
- name: Add latest tag to images
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
run: |
|
run: |
|
||||||
@@ -960,7 +1039,7 @@ jobs:
|
|||||||
|
|
||||||
push-to-acr-dev:
|
push-to-acr-dev:
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
needs: [ tag, promote-images ]
|
needs: [ tag, promote-images-dev ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
||||||
@@ -972,7 +1051,7 @@ jobs:
|
|||||||
|
|
||||||
push-to-acr-prod:
|
push-to-acr-prod:
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
needs: [ tag, promote-images ]
|
needs: [ tag, promote-images-prod ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
||||||
@@ -985,6 +1064,11 @@ jobs:
|
|||||||
trigger-custom-extensions-build-and-wait:
|
trigger-custom-extensions-build-and-wait:
|
||||||
needs: [ check-permissions, tag ]
|
needs: [ check-permissions, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -1057,15 +1141,82 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
needs: [ check-permissions, promote-images-prod, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
||||||
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Create git tag and GitHub release
|
||||||
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
const tag = "${{ needs.tag.outputs.build-tag }}";
|
||||||
|
|
||||||
|
try {
|
||||||
|
const existingRef = await github.rest.git.getRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `tags/${tag}`,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (existingRef.data.object.sha !== context.sha) {
|
||||||
|
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
|
||||||
|
} catch (error) {
|
||||||
|
if (error.status !== 404) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Tag ${tag} does not exist. Creating it...`);
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `refs/tags/${tag}`,
|
||||||
|
sha: context.sha,
|
||||||
|
});
|
||||||
|
console.log(`Tag ${tag} created successfully.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: check how GitHub releases looks for proxy/compute releases and enable them if they're ok
|
||||||
|
if (context.ref !== 'refs/heads/release') {
|
||||||
|
console.log(`GitHub release skipped for ${context.ref}.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const existingRelease = await github.rest.repos.getReleaseByTag({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag: tag,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
|
||||||
|
} catch (error) {
|
||||||
|
if (error.status !== 404) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Release for tag ${tag} does not exist. Creating it...`);
|
||||||
|
await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: tag,
|
||||||
|
generate_release_notes: true,
|
||||||
|
});
|
||||||
|
console.log(`Release for tag ${tag} created successfully.`);
|
||||||
|
}
|
||||||
|
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
@@ -1115,38 +1266,13 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create git tag
|
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
|
||||||
retries: 5
|
|
||||||
script: |
|
|
||||||
await github.rest.git.createRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
|
|
||||||
sha: context.sha,
|
|
||||||
})
|
|
||||||
|
|
||||||
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
|
|
||||||
- name: Create GitHub release
|
|
||||||
if: github.ref_name == 'release'
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
|
||||||
retries: 5
|
|
||||||
script: |
|
|
||||||
await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: "${{ needs.tag.outputs.build-tag }}",
|
|
||||||
generate_release_notes: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
||||||
promote-compatibility-data:
|
promote-compatibility-data:
|
||||||
needs: [ deploy ]
|
needs: [ deploy ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: github.ref_name == 'release' && !failure() && !cancelled()
|
if: github.ref_name == 'release' && !failure() && !cancelled()
|
||||||
|
|
||||||
@@ -1183,6 +1309,12 @@ jobs:
|
|||||||
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
||||||
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
||||||
|
|
||||||
|
- uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Promote compatibility snapshot and Neon artifact
|
- name: Promote compatibility snapshot and Neon artifact
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
@@ -1230,7 +1362,7 @@ jobs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
pin-build-tools-image:
|
pin-build-tools-image:
|
||||||
needs: [ build-build-tools-image, promote-images, build-and-test-locally ]
|
needs: [ build-build-tools-image, promote-images-prod, build-and-test-locally ]
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
uses: ./.github/workflows/pin-build-tools-image.yml
|
uses: ./.github/workflows/pin-build-tools-image.yml
|
||||||
with:
|
with:
|
||||||
@@ -1253,7 +1385,7 @@ jobs:
|
|||||||
- build-and-test-locally
|
- build-and-test-locally
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
- check-codestyle-rust
|
- check-codestyle-rust
|
||||||
- promote-images
|
- promote-images-dev
|
||||||
- test-images
|
- test-images
|
||||||
- trigger-custom-extensions-build-and-wait
|
- trigger-custom-extensions-build-and-wait
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|||||||
46
.github/workflows/cloud-regress.yml
vendored
46
.github/workflows/cloud-regress.yml
vendored
@@ -19,15 +19,21 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}
|
group: ${{ github.workflow }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
regress:
|
regress:
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 16
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
strategy:
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
pg-version: [16, 17]
|
||||||
|
|
||||||
runs-on: us-east-2
|
runs-on: us-east-2
|
||||||
container:
|
container:
|
||||||
@@ -40,9 +46,11 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Patch the test
|
- name: Patch the test
|
||||||
|
env:
|
||||||
|
PG_VERSION: ${{matrix.pg-version}}
|
||||||
run: |
|
run: |
|
||||||
cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
|
cd "vendor/postgres-v${PG_VERSION}"
|
||||||
patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
|
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch"
|
||||||
|
|
||||||
- name: Generate a random password
|
- name: Generate a random password
|
||||||
id: pwgen
|
id: pwgen
|
||||||
@@ -55,8 +63,9 @@ jobs:
|
|||||||
- name: Change tests according to the generated password
|
- name: Change tests according to the generated password
|
||||||
env:
|
env:
|
||||||
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
||||||
|
PG_VERSION: ${{matrix.pg-version}}
|
||||||
run: |
|
run: |
|
||||||
cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
|
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress
|
||||||
for fname in sql/*.sql expected/*.out; do
|
for fname in sql/*.sql expected/*.out; do
|
||||||
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
||||||
done
|
done
|
||||||
@@ -72,27 +81,46 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
|
- name: Create a new branch
|
||||||
|
id: create-branch
|
||||||
|
uses: ./.github/actions/neon-branch-create
|
||||||
|
with:
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
||||||
|
|
||||||
- name: Run the regression tests
|
- name: Run the regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
test_selection: cloud_regress
|
test_selection: cloud_regress
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{matrix.pg-version}}
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
|
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}}
|
||||||
|
|
||||||
|
- name: Delete branch
|
||||||
|
if: always()
|
||||||
|
uses: ./.github/actions/neon-branch-delete
|
||||||
|
with:
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
||||||
|
branch_id: ${{steps.create-branch.outputs.branch_id}}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
|
with:
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C033QLM5P7D" # on-call-staging-stream
|
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }}
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic pg_regress on staging: ${{ job.status }}
|
Periodic pg_regress on staging: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
|
|||||||
15
.github/workflows/ingest_benchmark.yml
vendored
15
.github/workflows/ingest_benchmark.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||||
- cron: '0 9 * * *' # run once a day, timezone is utc
|
- cron: '0 9 * * *' # run once a day, timezone is utc
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # allow other variants to continue even if one fails
|
fail-fast: false # allow other variants to continue even if one fails
|
||||||
matrix:
|
matrix:
|
||||||
target_project: [new_empty_project, large_existing_project]
|
target_project: [new_empty_project, large_existing_project]
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
statuses: write
|
statuses: write
|
||||||
@@ -56,7 +56,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||||
|
|
||||||
- name: Download Neon artifact
|
- name: Download Neon artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
@@ -64,6 +64,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: ${{ matrix.target_project == 'new_empty_project' }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
@@ -94,7 +95,7 @@ jobs:
|
|||||||
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project
|
||||||
if: ${{ matrix.target_project == 'large_existing_project' }}
|
if: ${{ matrix.target_project == 'large_existing_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
||||||
@@ -122,7 +123,7 @@ jobs:
|
|||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
||||||
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Invoke pgcopydb
|
- name: Invoke pgcopydb
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: remote
|
build_type: remote
|
||||||
@@ -131,7 +132,7 @@ jobs:
|
|||||||
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
save_perf_report: true
|
save_perf_report: true
|
||||||
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
||||||
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
||||||
@@ -143,7 +144,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
|||||||
152
.github/workflows/neon_extra_builds.yml
vendored
152
.github/workflows/neon_extra_builds.yml
vendored
@@ -31,19 +31,15 @@ jobs:
|
|||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
check-macos-build:
|
files-changed:
|
||||||
needs: [ check-permissions ]
|
name: Detect what files changed
|
||||||
if: |
|
runs-on: ubuntu-22.04
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
timeout-minutes: 3
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
outputs:
|
||||||
github.ref_name == 'main'
|
v17: ${{ steps.files_changed.outputs.v17 }}
|
||||||
timeout-minutes: 90
|
postgres_changes: ${{ steps.postgres_changes.outputs.changes }}
|
||||||
runs-on: macos-15
|
rebuild_rust_code: ${{ steps.files_changed.outputs.rust_code }}
|
||||||
|
rebuild_everything: ${{ steps.files_changed.outputs.rebuild_neon_extra || steps.files_changed.outputs.rebuild_macos }}
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -51,102 +47,45 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Install macOS postgres dependencies
|
- name: Check for Postgres changes
|
||||||
run: brew install flex bison openssl protobuf icu4c
|
uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 #v3
|
||||||
|
id: files_changed
|
||||||
- name: Set pg 14 revision for caching
|
|
||||||
id: pg_v14_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set pg 15 revision for caching
|
|
||||||
id: pg_v15_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set pg 16 revision for caching
|
|
||||||
id: pg_v16_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set pg 17 revision for caching
|
|
||||||
id: pg_v17_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
|
||||||
id: cache_pg_14
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
with:
|
||||||
path: pg_install/v14
|
token: ${{ github.token }}
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
filters: .github/file-filters.yaml
|
||||||
|
base: ${{ github.event_name != 'pull_request' && (github.event.merge_group.base_ref || github.ref_name) || '' }}
|
||||||
|
ref: ${{ github.event_name != 'pull_request' && (github.event.merge_group.head_ref || github.ref) || '' }}
|
||||||
|
|
||||||
- name: Cache postgres v15 build
|
- name: Filter out only v-string for build matrix
|
||||||
id: cache_pg_15
|
id: postgres_changes
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v15
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache postgres v16 build
|
|
||||||
id: cache_pg_16
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v16
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache postgres v17 build
|
|
||||||
id: cache_pg_17
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v17
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
run: |
|
run: |
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
v_strings_only_as_json_array=$(echo ${{ steps.files_changed.outputs.chnages }} | jq '.[]|select(test("v\\d+"))' | jq --slurp -c)
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
echo "changes=${v_strings_only_as_json_array}" | tee -a "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
- name: Cache cargo deps
|
check-macos-build:
|
||||||
uses: actions/cache@v4
|
needs: [ check-permissions, files-changed ]
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
|
||||||
|
|
||||||
- name: Build postgres v14
|
|
||||||
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v14 -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build postgres v15
|
|
||||||
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v15 -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build postgres v16
|
|
||||||
if: steps.cache_pg_16.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v16 -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build postgres v17
|
|
||||||
if: steps.cache_pg_17.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v17 -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build neon extensions
|
|
||||||
run: make neon-pg-ext -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build walproposer-lib
|
|
||||||
run: make walproposer-lib -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Run cargo build
|
|
||||||
run: PQ_LIB_DIR=$(pwd)/pg_install/v16/lib cargo build --all --release
|
|
||||||
|
|
||||||
- name: Check that no warnings are produced
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
|
|
||||||
gather-rust-build-stats:
|
|
||||||
needs: [ check-permissions, build-build-tools-image ]
|
|
||||||
if: |
|
if: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
github.ref_name == 'main'
|
github.ref_name == 'main'
|
||||||
|
uses: ./.github/workflows/build-macos.yml
|
||||||
|
with:
|
||||||
|
pg_versions: ${{ needs.files-changed.outputs.postgres_changes }}
|
||||||
|
rebuild_rust_code: ${{ needs.files-changed.outputs.rebuild_rust_code }}
|
||||||
|
rebuild_everything: ${{ fromJson(needs.files-changed.outputs.rebuild_everything) }}
|
||||||
|
|
||||||
|
gather-rust-build-stats:
|
||||||
|
needs: [ check-permissions, build-build-tools-image, files-changed ]
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
if: |
|
||||||
|
(needs.files-changed.outputs.v17 == 'true' || needs.files-changed.outputs.rebuild_everything == 'true') && (
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
|
github.ref_name == 'main'
|
||||||
|
)
|
||||||
runs-on: [ self-hosted, large ]
|
runs-on: [ self-hosted, large ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -177,13 +116,18 @@ jobs:
|
|||||||
- name: Produce the build stats
|
- name: Produce the build stats
|
||||||
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
||||||
|
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
id: upload-stats
|
id: upload-stats
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
run: |
|
run: |
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
|||||||
27
.github/workflows/periodic_pagebench.yml
vendored
27
.github/workflows/periodic_pagebench.yml
vendored
@@ -27,6 +27,11 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trigger_bench_on_ec2_machine_in_eu_central_1:
|
trigger_bench_on_ec2_machine_in_eu_central_1:
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned-bookworm
|
||||||
@@ -38,8 +43,6 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
||||||
RUN_ID: ${{ github.run_id }}
|
RUN_ID: ${{ github.run_id }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
|
|
||||||
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
|
|
||||||
AWS_DEFAULT_REGION : "eu-central-1"
|
AWS_DEFAULT_REGION : "eu-central-1"
|
||||||
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
||||||
steps:
|
steps:
|
||||||
@@ -50,6 +53,13 @@ jobs:
|
|||||||
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
||||||
run: curl https://ifconfig.me
|
run: curl https://ifconfig.me
|
||||||
|
|
||||||
|
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Start EC2 instance and wait for the instance to boot up
|
- name: Start EC2 instance and wait for the instance to boot up
|
||||||
run: |
|
run: |
|
||||||
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
||||||
@@ -124,11 +134,10 @@ jobs:
|
|||||||
cat "test_log_${GITHUB_RUN_ID}"
|
cat "test_log_${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
env:
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
|
with:
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -148,6 +157,14 @@ jobs:
|
|||||||
-H "Authorization: Bearer $API_KEY" \
|
-H "Authorization: Bearer $API_KEY" \
|
||||||
-d ''
|
-d ''
|
||||||
|
|
||||||
|
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
||||||
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
||||||
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
- name: Stop EC2 instance and wait for the instance to be stopped
|
- name: Stop EC2 instance and wait for the instance to be stopped
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
12
.github/workflows/pg-clients.yml
vendored
12
.github/workflows/pg-clients.yml
vendored
@@ -25,11 +25,13 @@ defaults:
|
|||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
id-token: write # aws-actions/configure-aws-credentials
|
||||||
|
statuses: write # require for posting a status update
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 16
|
DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: neon-captest-new
|
PLATFORM: neon-captest-new
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
AWS_DEFAULT_REGION: eu-central-1
|
AWS_DEFAULT_REGION: eu-central-1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -94,6 +96,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -110,6 +113,7 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -126,6 +130,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -159,6 +164,7 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -175,6 +181,7 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -191,6 +198,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
|
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/pin-build-tools-image.yml
vendored
14
.github/workflows/pin-build-tools-image.yml
vendored
@@ -67,7 +67,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # for `azure/login`
|
id-token: write # for `azure/login` and aws auth
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -75,11 +75,15 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
aws-region: eu-central-1
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
role-duration-seconds: 3600
|
||||||
|
|
||||||
|
- name: Login to Amazon Dev ECR
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
|
||||||
- name: Azure login
|
- name: Azure login
|
||||||
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
||||||
|
|||||||
1
.github/workflows/pre-merge-checks.yml
vendored
1
.github/workflows/pre-merge-checks.yml
vendored
@@ -63,6 +63,7 @@ jobs:
|
|||||||
if: always()
|
if: always()
|
||||||
permissions:
|
permissions:
|
||||||
statuses: write # for `github.repos.createCommitStatus(...)`
|
statuses: write # for `github.repos.createCommitStatus(...)`
|
||||||
|
contents: write
|
||||||
needs:
|
needs:
|
||||||
- get-changed-files
|
- get-changed-files
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: Create Release Branch
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# It should be kept in sync with if-condition in jobs
|
# It should be kept in sync with if-condition in jobs
|
||||||
- cron: '0 6 * * MON' # Storage release
|
- cron: '0 6 * * FRI' # Storage release
|
||||||
- cron: '0 6 * * THU' # Proxy release
|
- cron: '0 6 * * THU' # Proxy release
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@@ -29,7 +29,7 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
8
.github/workflows/trigger-e2e-tests.yml
vendored
8
.github/workflows/trigger-e2e-tests.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
TAG: ${{ needs.tag.outputs.build-tag }}
|
TAG: ${{ needs.tag.outputs.build-tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Wait for `promote-images` job to finish
|
- name: Wait for `promote-images-dev` job to finish
|
||||||
# It's important to have a timeout here, the script in the step can run infinitely
|
# It's important to have a timeout here, the script in the step can run infinitely
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
run: |
|
run: |
|
||||||
@@ -79,17 +79,17 @@ jobs:
|
|||||||
# For PRs we use the run id as the tag
|
# For PRs we use the run id as the tag
|
||||||
BUILD_AND_TEST_RUN_ID=${TAG}
|
BUILD_AND_TEST_RUN_ID=${TAG}
|
||||||
while true; do
|
while true; do
|
||||||
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images") | .conclusion')
|
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images-dev") | .conclusion')
|
||||||
case "$conclusion" in
|
case "$conclusion" in
|
||||||
success)
|
success)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
failure | cancelled | skipped)
|
failure | cancelled | skipped)
|
||||||
echo "The 'promote-images' job didn't succeed: '${conclusion}'. Exiting..."
|
echo "The 'promote-images-dev' job didn't succeed: '${conclusion}'. Exiting..."
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "The 'promote-images' hasn't succeed yet. Waiting..."
|
echo "The 'promote-images-dev' hasn't succeed yet. Waiting..."
|
||||||
sleep 60
|
sleep 60
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|||||||
33
CODEOWNERS
33
CODEOWNERS
@@ -1,16 +1,29 @@
|
|||||||
/.github/ @neondatabase/developer-productivity
|
# Autoscaling
|
||||||
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/proxy/ @neondatabase/proxy
|
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
|
||||||
/libs/safekeeper_api/ @neondatabase/storage
|
|
||||||
/libs/vm_monitor/ @neondatabase/autoscaling
|
/libs/vm_monitor/ @neondatabase/autoscaling
|
||||||
/pageserver/ @neondatabase/storage
|
|
||||||
|
# DevProd
|
||||||
|
/.github/ @neondatabase/developer-productivity
|
||||||
|
|
||||||
|
# Compute
|
||||||
/pgxn/ @neondatabase/compute
|
/pgxn/ @neondatabase/compute
|
||||||
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
/vendor/ @neondatabase/compute
|
||||||
|
/compute/ @neondatabase/compute
|
||||||
|
/compute_tools/ @neondatabase/compute
|
||||||
|
|
||||||
|
# Proxy
|
||||||
|
/libs/proxy/ @neondatabase/proxy
|
||||||
/proxy/ @neondatabase/proxy
|
/proxy/ @neondatabase/proxy
|
||||||
|
|
||||||
|
# Storage
|
||||||
|
/pageserver/ @neondatabase/storage
|
||||||
/safekeeper/ @neondatabase/storage
|
/safekeeper/ @neondatabase/storage
|
||||||
/storage_controller @neondatabase/storage
|
/storage_controller @neondatabase/storage
|
||||||
/storage_scrubber @neondatabase/storage
|
/storage_scrubber @neondatabase/storage
|
||||||
/vendor/ @neondatabase/compute
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/storage
|
||||||
|
|
||||||
|
# Shared
|
||||||
|
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
|
||||||
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
||||||
|
|||||||
648
Cargo.lock
generated
648
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
17
Cargo.toml
17
Cargo.toml
@@ -11,6 +11,7 @@ members = [
|
|||||||
"pageserver/pagebench",
|
"pageserver/pagebench",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
|
"safekeeper/client",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"storage_controller",
|
"storage_controller",
|
||||||
"storage_controller/client",
|
"storage_controller/client",
|
||||||
@@ -51,10 +52,7 @@ anyhow = { version = "1.0", features = ["backtrace"] }
|
|||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
||||||
atomic-take = "1.1.0"
|
atomic-take = "1.1.0"
|
||||||
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
backtrace = "0.3.74"
|
||||||
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
flate2 = "1.0.26"
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
@@ -137,7 +135,7 @@ parquet = { version = "53", default-features = false, features = ["zstd"] }
|
|||||||
parquet_derive = "53"
|
parquet_derive = "53"
|
||||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
pprof = { version = "0.14", features = ["criterion", "flamegraph", "protobuf", "protobuf-codec"] }
|
pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "protobuf", "protobuf-codec"] }
|
||||||
procfs = "0.16"
|
procfs = "0.16"
|
||||||
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
@@ -216,6 +214,12 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
|
|||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
|
|
||||||
|
## Azure SDK crates
|
||||||
|
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
||||||
|
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
@@ -231,6 +235,7 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
|
|||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
|
safekeeper_client = { path = "./safekeeper/client" }
|
||||||
desim = { version = "0.1", path = "./libs/desim" }
|
desim = { version = "0.1", path = "./libs/desim" }
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
storage_controller_client = { path = "./storage_controller/client" }
|
storage_controller_client = { path = "./storage_controller/client" }
|
||||||
@@ -261,6 +266,8 @@ tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", br
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
# This is useful for profiling and, to some extent, debug.
|
# This is useful for profiling and, to some extent, debug.
|
||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
|
#
|
||||||
|
# NB: we also enable frame pointers for improved profiling, see .cargo/config.toml.
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
# disable debug symbols for all packages except this one to decrease binaries size
|
# disable debug symbols for all packages except this one to decrease binaries size
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ COPY --chown=nonroot . .
|
|||||||
|
|
||||||
ARG ADDITIONAL_RUSTFLAGS
|
ARG ADDITIONAL_RUSTFLAGS
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
||||||
--bin pg_sni_router \
|
--bin pg_sni_router \
|
||||||
--bin pageserver \
|
--bin pageserver \
|
||||||
--bin pagectl \
|
--bin pagectl \
|
||||||
@@ -69,6 +69,8 @@ RUN set -e \
|
|||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libseccomp-dev \
|
libseccomp-dev \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
# System postgres for use with client libraries (e.g. in storage controller)
|
||||||
|
postgresql-15 \
|
||||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ RUN set -e \
|
|||||||
|
|
||||||
# Keep the version the same as in compute/compute-node.Dockerfile and
|
# Keep the version the same as in compute/compute-node.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
ENV SQL_EXPORTER_VERSION=0.13.1
|
ENV SQL_EXPORTER_VERSION=0.16.0
|
||||||
RUN curl -fsSL \
|
RUN curl -fsSL \
|
||||||
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
||||||
--output sql_exporter.tar.gz \
|
--output sql_exporter.tar.gz \
|
||||||
|
|||||||
@@ -35,10 +35,12 @@ RUN case $DEBIAN_VERSION in \
|
|||||||
;; \
|
;; \
|
||||||
esac && \
|
esac && \
|
||||||
apt update && \
|
apt update && \
|
||||||
apt install --no-install-recommends -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
apt install --no-install-recommends --no-install-suggests -y \
|
||||||
|
ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
||||||
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
||||||
$VERSION_INSTALLS
|
$VERSION_INSTALLS \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -113,10 +115,12 @@ ARG DEBIAN_VERSION
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends -y gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
apt install --no-install-recommends --no-install-suggests -y \
|
||||||
|
gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||||
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||||
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||||
protobuf-c-compiler xsltproc
|
protobuf-c-compiler xsltproc \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
||||||
# Postgis 3.5.0 requires SFCGAL 1.4+
|
# Postgis 3.5.0 requires SFCGAL 1.4+
|
||||||
@@ -143,9 +147,9 @@ RUN case "${DEBIAN_VERSION}" in \
|
|||||||
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
||||||
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
cmake -DCMAKE_BUILD_TYPE=Release -GNinja . && ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/sfcgal ninja install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make clean && cp -R /sfcgal/* /
|
ninja clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
@@ -213,9 +217,9 @@ RUN case "${PG_VERSION}" in \
|
|||||||
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
||||||
@@ -235,7 +239,9 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
|
apt install --no-install-recommends --no-install-suggests -y \
|
||||||
|
ninja-build python3-dev libncurses5 binutils clang \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# plv8 3.2.3 supports v17
|
# plv8 3.2.3 supports v17
|
||||||
# last release v3.2.3 - Sep 7, 2024
|
# last release v3.2.3 - Sep 7, 2024
|
||||||
@@ -301,9 +307,10 @@ RUN mkdir -p /h3/usr/ && \
|
|||||||
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
cmake .. -GNinja -DBUILD_BENCHMARKS=0 -DCMAKE_BUILD_TYPE=Release \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
-DBUILD_FUZZERS=0 -DBUILD_FILTERS=0 -DBUILD_GENERATORS=0 -DBUILD_TESTING=0 \
|
||||||
DESTDIR=/h3 make install && \
|
&& ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 ninja install && \
|
||||||
cp -R /h3/usr / && \
|
cp -R /h3/usr / && \
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
@@ -650,14 +657,15 @@ FROM build-deps AS rdkit-pg-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt update && \
|
||||||
apt-get install --no-install-recommends -y \
|
apt install --no-install-recommends --no-install-suggests -y \
|
||||||
libboost-iostreams1.74-dev \
|
libboost-iostreams1.74-dev \
|
||||||
libboost-regex1.74-dev \
|
libboost-regex1.74-dev \
|
||||||
libboost-serialization1.74-dev \
|
libboost-serialization1.74-dev \
|
||||||
libboost-system1.74-dev \
|
libboost-system1.74-dev \
|
||||||
libeigen3-dev \
|
libeigen3-dev \
|
||||||
libboost-all-dev
|
libboost-all-dev \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# rdkit Release_2024_09_1 supports v17
|
# rdkit Release_2024_09_1 supports v17
|
||||||
# last release Release_2024_09_1 - Sep 27, 2024
|
# last release Release_2024_09_1 - Sep 27, 2024
|
||||||
@@ -693,6 +701,8 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
||||||
|
-D RDK_TEST_MULTITHREADED=OFF \
|
||||||
|
-D RDK_BUILD_CPP_TESTS=OFF \
|
||||||
-D RDK_USE_URF=OFF \
|
-D RDK_USE_URF=OFF \
|
||||||
-D RDK_BUILD_PGSQL=ON \
|
-D RDK_BUILD_PGSQL=ON \
|
||||||
-D RDK_PGSQL_STATIC=ON \
|
-D RDK_PGSQL_STATIC=ON \
|
||||||
@@ -704,9 +714,10 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
||||||
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
||||||
-D CMAKE_BUILD_TYPE=Release \
|
-D CMAKE_BUILD_TYPE=Release \
|
||||||
|
-GNinja \
|
||||||
. && \
|
. && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -849,8 +860,9 @@ FROM build-deps AS rust-extensions-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt update && \
|
||||||
apt-get install --no-install-recommends -y curl libclang-dev && \
|
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
||||||
|
apt clean && rm -rf /var/lib/apt/lists/* && \
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -885,8 +897,9 @@ FROM build-deps AS rust-extensions-build-pgrx12
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt update && \
|
||||||
apt-get install --no-install-recommends -y curl libclang-dev && \
|
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
||||||
|
apt clean && rm -rf /var/lib/apt/lists/* && \
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -914,18 +927,22 @@ FROM rust-extensions-build-pgrx12 AS pg-onnx-build
|
|||||||
|
|
||||||
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
||||||
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
||||||
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv && \
|
RUN apt update && apt install --no-install-recommends --no-install-suggests -y \
|
||||||
|
python3 python3-pip python3-venv && \
|
||||||
|
apt clean && rm -rf /var/lib/apt/lists/* && \
|
||||||
python3 -m venv venv && \
|
python3 -m venv venv && \
|
||||||
. venv/bin/activate && \
|
. venv/bin/activate && \
|
||||||
python3 -m pip install cmake==3.30.5 && \
|
python3 -m pip install cmake==3.30.5 && \
|
||||||
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
||||||
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
||||||
./build.sh --config Release --parallel --skip_submodule_sync --skip_tests --allow_running_as_root
|
./build.sh --config Release --parallel --cmake_generator Ninja \
|
||||||
|
--skip_submodule_sync --skip_tests --allow_running_as_root
|
||||||
|
|
||||||
|
|
||||||
FROM pg-onnx-build AS pgrag-pg-build
|
FROM pg-onnx-build AS pgrag-pg-build
|
||||||
|
|
||||||
RUN apt-get install -y protobuf-compiler && \
|
RUN apt update && apt install --no-install-recommends --no-install-suggests -y protobuf-compiler \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/* && \
|
||||||
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
||||||
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
||||||
@@ -1168,6 +1185,25 @@ RUN case "${PG_VERSION}" in \
|
|||||||
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg_repack"
|
||||||
|
# compile pg_repack extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM build-deps AS pg-repack-build
|
||||||
|
ARG PG_VERSION
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH="/usr/local/pgsql/bin/:$PATH"
|
||||||
|
|
||||||
|
RUN wget https://github.com/reorg/pg_repack/archive/refs/tags/ver_1.5.2.tar.gz -O pg_repack.tar.gz && \
|
||||||
|
echo '4516cad42251ed3ad53ff619733004db47d5755acac83f75924cd94d1c4fb681 pg_repack.tar.gz' | sha256sum --check && \
|
||||||
|
mkdir pg_repack-src && cd pg_repack-src && tar xzf ../pg_repack.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "neon-pg-ext-build"
|
# Layer "neon-pg-ext-build"
|
||||||
@@ -1213,6 +1249,7 @@ COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-repack-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY pgxn/ pgxn/
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
@@ -1248,7 +1285,7 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Compile and run the Neon-specific `compute_ctl` and `fast_import` binaries
|
# Compile the Neon-specific `compute_ctl`, `fast_import`, and `local_proxy` binaries
|
||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
@@ -1258,7 +1295,7 @@ ENV BUILD_TAG=$BUILD_TAG
|
|||||||
USER nonroot
|
USER nonroot
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
COPY --chown=nonroot . .
|
COPY --chown=nonroot . .
|
||||||
RUN cd compute_tools && mold -run cargo build --locked --profile release-line-debug-size-lto
|
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1279,8 +1316,8 @@ COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_
|
|||||||
|
|
||||||
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& apt-get update \
|
&& apt update \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt install --no-install-suggests --no-install-recommends -y \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
@@ -1288,7 +1325,8 @@ RUN set -e \
|
|||||||
automake \
|
automake \
|
||||||
libevent-dev \
|
libevent-dev \
|
||||||
libtool \
|
libtool \
|
||||||
pkg-config
|
pkg-config \
|
||||||
|
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
||||||
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
||||||
@@ -1300,20 +1338,6 @@ RUN set -e \
|
|||||||
&& make -j $(nproc) dist_man_MANS= \
|
&& make -j $(nproc) dist_man_MANS= \
|
||||||
&& make install dist_man_MANS=
|
&& make install dist_man_MANS=
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Compile the Neon-specific `local_proxy` binary
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS local_proxy
|
|
||||||
ARG BUILD_TAG
|
|
||||||
ENV BUILD_TAG=$BUILD_TAG
|
|
||||||
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin local_proxy
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layers "postgres-exporter" and "sql-exporter"
|
# Layers "postgres-exporter" and "sql-exporter"
|
||||||
@@ -1324,7 +1348,7 @@ FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter
|
|||||||
|
|
||||||
# Keep the version the same as in build-tools.Dockerfile and
|
# Keep the version the same as in build-tools.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
FROM burningalchemist/sql_exporter:0.13.1 AS sql-exporter
|
FROM burningalchemist/sql_exporter:0.16.0 AS sql-exporter
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1453,7 +1477,7 @@ COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/
|
|||||||
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
||||||
|
|
||||||
# local_proxy and its config
|
# local_proxy and its config
|
||||||
COPY --from=local_proxy --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
||||||
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
||||||
|
|
||||||
# Metrics exporter binaries and configuration files
|
# Metrics exporter binaries and configuration files
|
||||||
@@ -1518,28 +1542,30 @@ RUN apt update && \
|
|||||||
locales \
|
locales \
|
||||||
procps \
|
procps \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
unzip \
|
||||||
$VERSION_INSTALLS && \
|
$VERSION_INSTALLS && \
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2
|
# aws cli is used by fast_import (curl and unzip above are at this time only used for this installation step)
|
||||||
# used by fast_import
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ADD https://github.com/peak/s5cmd/releases/download/v2.2.2/s5cmd_2.2.2_linux_$TARGETARCH.deb /tmp/s5cmd.deb
|
|
||||||
RUN set -ex; \
|
RUN set -ex; \
|
||||||
\
|
|
||||||
# Determine the expected checksum based on TARGETARCH
|
|
||||||
if [ "${TARGETARCH}" = "amd64" ]; then \
|
if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||||
CHECKSUM="392c385320cd5ffa435759a95af77c215553d967e4b1c0fffe52e4f14c29cf85"; \
|
TARGETARCH_ALT="x86_64"; \
|
||||||
|
CHECKSUM="c9a9df3770a3ff9259cb469b6179e02829687a464e0824d5c32d378820b53a00"; \
|
||||||
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
||||||
CHECKSUM="939bee3cf4b5604ddb00e67f8c157b91d7c7a5b553d1fbb6890fad32894b7b46"; \
|
TARGETARCH_ALT="aarch64"; \
|
||||||
|
CHECKSUM="8181730be7891582b38b028112e81b4899ca817e8c616aad807c9e9d1289223a"; \
|
||||||
else \
|
else \
|
||||||
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
||||||
fi; \
|
fi; \
|
||||||
\
|
curl -L "https://awscli.amazonaws.com/awscli-exe-linux-${TARGETARCH_ALT}-2.17.5.zip" -o /tmp/awscliv2.zip; \
|
||||||
# Compute and validate the checksum
|
echo "${CHECKSUM} /tmp/awscliv2.zip" | sha256sum -c -; \
|
||||||
echo "${CHECKSUM} /tmp/s5cmd.deb" | sha256sum -c -
|
unzip /tmp/awscliv2.zip -d /tmp/awscliv2; \
|
||||||
RUN dpkg -i /tmp/s5cmd.deb && rm /tmp/s5cmd.deb
|
/tmp/awscliv2/aws/install; \
|
||||||
|
rm -rf /tmp/awscliv2.zip /tmp/awscliv2; \
|
||||||
|
true
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
metrics: [
|
metrics: [
|
||||||
import 'sql_exporter/checkpoints_req.libsonnet',
|
import 'sql_exporter/checkpoints_req.libsonnet',
|
||||||
import 'sql_exporter/checkpoints_timed.libsonnet',
|
import 'sql_exporter/checkpoints_timed.libsonnet',
|
||||||
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet',
|
||||||
import 'sql_exporter/compute_current_lsn.libsonnet',
|
import 'sql_exporter/compute_current_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
||||||
|
|||||||
@@ -19,3 +19,10 @@ max_prepared_statements=0
|
|||||||
admin_users=postgres
|
admin_users=postgres
|
||||||
unix_socket_dir=/tmp/
|
unix_socket_dir=/tmp/
|
||||||
unix_socket_mode=0777
|
unix_socket_mode=0777
|
||||||
|
|
||||||
|
;; Disable connection logging. It produces a lot of logs that no one looks at,
|
||||||
|
;; and we can get similar log entries from the proxy too. We had incidents in
|
||||||
|
;; the past where the logging significantly stressed the log device or pgbouncer
|
||||||
|
;; itself.
|
||||||
|
log_connections=0
|
||||||
|
log_disconnections=0
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
{
|
{
|
||||||
metric_name: 'compute_backpressure_throttling_seconds',
|
metric_name: 'compute_backpressure_throttling_seconds_total',
|
||||||
type: 'gauge',
|
type: 'counter',
|
||||||
help: 'Time compute has spent throttled',
|
help: 'Time compute has spent throttled',
|
||||||
key_labels: null,
|
key_labels: null,
|
||||||
values: [
|
values: [
|
||||||
'throttled',
|
'throttled',
|
||||||
],
|
],
|
||||||
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
|
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql',
|
||||||
}
|
}
|
||||||
@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
|
|||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
SET search_path TO 'addr_nsp';
|
SET search_path TO 'addr_nsp';
|
||||||
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
||||||
index 8475231735..1afae5395f 100644
|
index 8475231735..0653946337 100644
|
||||||
--- a/src/test/regress/expected/password.out
|
--- a/src/test/regress/expected/password.out
|
||||||
+++ b/src/test/regress/expected/password.out
|
+++ b/src/test/regress/expected/password.out
|
||||||
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
||||||
@@ -1006,65 +1006,63 @@ index 8475231735..1afae5395f 100644
|
|||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
||||||
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd4 |
|
- regress_passwd4 |
|
||||||
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- Rename a role
|
-- Rename a role
|
||||||
@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
-- passwords.
|
-- passwords.
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
|
--- already encrypted, use as they are
|
||||||
|
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- already encrypted, use as they are
|
|
||||||
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
|
||||||
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
|
--- already encrypted with MD5, use as it is
|
||||||
|
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
|
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
||||||
|
--- so it should be hashed with SCRAM-SHA-256.
|
||||||
|
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
||||||
|
--- These may look like valid MD5 secrets, but they are not, so they
|
||||||
|
--- should be hashed with SCRAM-SHA-256.
|
||||||
|
--- trailing garbage at the end
|
||||||
|
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
||||||
|
--- invalid length
|
||||||
|
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- already encrypted with MD5, use as it is
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
-- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- so it should be hashed with SCRAM-SHA-256.
|
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
|
||||||
-- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
-- should be hashed with SCRAM-SHA-256.
|
|
||||||
-- trailing garbage at the end
|
|
||||||
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
|
||||||
-- invalid length
|
|
||||||
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
|
||||||
-- Changing the SCRAM iteration count
|
-- Changing the SCRAM iteration count
|
||||||
SET scram_iterations = 1024;
|
SET scram_iterations = 1024;
|
||||||
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
||||||
@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
||||||
ORDER BY rolname, rolpassword;
|
ORDER BY rolname, rolpassword;
|
||||||
rolname | rolpassword_masked
|
rolname | rolpassword_masked
|
||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
||||||
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
||||||
- regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
|
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
-(9 rows)
|
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
||||||
+(5 rows)
|
|
||||||
|
|
||||||
-- An empty password is not allowed, in any form
|
-- An empty password is not allowed, in any form
|
||||||
CREATE ROLE regress_passwd_empty PASSWORD '';
|
CREATE ROLE regress_passwd_empty PASSWORD '';
|
||||||
NOTICE: empty string is not a valid password, clearing password
|
NOTICE: empty string is not a valid password, clearing password
|
||||||
@@ -1082,56 +1080,37 @@ index 8475231735..1afae5395f 100644
|
|||||||
-(1 row)
|
-(1 row)
|
||||||
+(0 rows)
|
+(0 rows)
|
||||||
|
|
||||||
-- Test with invalid stored and server keys.
|
--- Test with invalid stored and server keys.
|
||||||
--
|
---
|
||||||
-- The first is valid, to act as a control. The others have too long
|
--- The first is valid, to act as a control. The others have too long
|
||||||
-- stored/server keys. They will be re-hashed.
|
--- stored/server keys. They will be re-hashed.
|
||||||
CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
||||||
-- should not contain the original salt.
|
-- should not contain the original salt.
|
||||||
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
||||||
FROM pg_authid
|
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw
|
||||||
WHERE rolname LIKE 'regress_passwd_sha_len%'
|
|
||||||
ORDER BY rolname;
|
ORDER BY rolname;
|
||||||
- rolname | is_rolpassword_rehashed
|
rolname | is_rolpassword_rehashed
|
||||||
--------------------------+-------------------------
|
-------------------------+-------------------------
|
||||||
- regress_passwd_sha_len0 | f
|
- regress_passwd_sha_len0 | f
|
||||||
- regress_passwd_sha_len1 | t
|
+ regress_passwd_sha_len0 | t
|
||||||
- regress_passwd_sha_len2 | t
|
regress_passwd_sha_len1 | t
|
||||||
-(3 rows)
|
regress_passwd_sha_len2 | t
|
||||||
+ rolname | is_rolpassword_rehashed
|
(3 rows)
|
||||||
+---------+-------------------------
|
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7;
|
||||||
+(0 rows)
|
|
||||||
|
|
||||||
DROP ROLE regress_passwd1;
|
|
||||||
DROP ROLE regress_passwd2;
|
|
||||||
DROP ROLE regress_passwd3;
|
|
||||||
DROP ROLE regress_passwd4;
|
|
||||||
DROP ROLE regress_passwd5;
|
|
||||||
+ERROR: role "regress_passwd5" does not exist
|
|
||||||
DROP ROLE regress_passwd6;
|
|
||||||
+ERROR: role "regress_passwd6" does not exist
|
|
||||||
DROP ROLE regress_passwd7;
|
|
||||||
+ERROR: role "regress_passwd7" does not exist
|
|
||||||
DROP ROLE regress_passwd8;
|
DROP ROLE regress_passwd8;
|
||||||
+ERROR: role "regress_passwd8" does not exist
|
|
||||||
DROP ROLE regress_passwd9;
|
DROP ROLE regress_passwd9;
|
||||||
DROP ROLE regress_passwd_empty;
|
DROP ROLE regress_passwd_empty;
|
||||||
+ERROR: role "regress_passwd_empty" does not exist
|
+ERROR: role "regress_passwd_empty" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len0;
|
DROP ROLE regress_passwd_sha_len0;
|
||||||
+ERROR: role "regress_passwd_sha_len0" does not exist
|
|
||||||
DROP ROLE regress_passwd_sha_len1;
|
DROP ROLE regress_passwd_sha_len1;
|
||||||
+ERROR: role "regress_passwd_sha_len1" does not exist
|
|
||||||
DROP ROLE regress_passwd_sha_len2;
|
DROP ROLE regress_passwd_sha_len2;
|
||||||
+ERROR: role "regress_passwd_sha_len2" does not exist
|
|
||||||
-- all entries should have been removed
|
|
||||||
SELECT rolname, rolpassword
|
|
||||||
FROM pg_authid
|
|
||||||
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
||||||
index 5b9dba7b32..cc408dad42 100644
|
index 5b9dba7b32..cc408dad42 100644
|
||||||
--- a/src/test/regress/expected/privileges.out
|
--- a/src/test/regress/expected/privileges.out
|
||||||
@@ -3194,7 +3173,7 @@ index 1a6c61f49d..1c31ac6a53 100644
|
|||||||
-- Test generic object addressing/identification functions
|
-- Test generic object addressing/identification functions
|
||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
||||||
index 53e86b0b6c..f07cf1ec54 100644
|
index 53e86b0b6c..0303fdfe96 100644
|
||||||
--- a/src/test/regress/sql/password.sql
|
--- a/src/test/regress/sql/password.sql
|
||||||
+++ b/src/test/regress/sql/password.sql
|
+++ b/src/test/regress/sql/password.sql
|
||||||
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
||||||
@@ -3213,23 +3192,59 @@ index 53e86b0b6c..f07cf1ec54 100644
|
|||||||
|
|
||||||
-- check list of created entries
|
-- check list of created entries
|
||||||
--
|
--
|
||||||
@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
|
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
|
--- already encrypted, use as they are
|
||||||
|
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- already encrypted, use as they are
|
|
||||||
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
|
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
|
--- already encrypted with MD5, use as it is
|
||||||
|
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
-- already encrypted with MD5, use as it is
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
|
||||||
|
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
||||||
|
--- so it should be hashed with SCRAM-SHA-256.
|
||||||
|
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
||||||
|
--- These may look like valid MD5 secrets, but they are not, so they
|
||||||
|
--- should be hashed with SCRAM-SHA-256.
|
||||||
|
--- trailing garbage at the end
|
||||||
|
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
||||||
|
--- invalid length
|
||||||
|
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
||||||
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
|
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
|
||||||
|
-- Changing the SCRAM iteration count
|
||||||
|
SET scram_iterations = 1024;
|
||||||
|
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
|
||||||
|
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
|
||||||
|
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
|
||||||
|
|
||||||
|
--- Test with invalid stored and server keys.
|
||||||
|
---
|
||||||
|
--- The first is valid, to act as a control. The others have too long
|
||||||
|
--- stored/server keys. They will be re-hashed.
|
||||||
|
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
|
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
|
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
||||||
|
+-- Neon does not support encrypted passwords, use unencrypted instead
|
||||||
|
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
|
||||||
|
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
||||||
|
-- should not contain the original salt.
|
||||||
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
||||||
index 249df17a58..b258e7f26a 100644
|
index 249df17a58..b258e7f26a 100644
|
||||||
--- a/src/test/regress/sql/privileges.sql
|
--- a/src/test/regress/sql/privileges.sql
|
||||||
|
|||||||
4058
compute/patches/cloud_regress_pg17.patch
Normal file
4058
compute/patches/cloud_regress_pg17.patch
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ license.workspace = true
|
|||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
# Enables test specific features.
|
# Enables test specific features.
|
||||||
testing = []
|
testing = ["fail/failpoints"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -19,6 +19,7 @@ camino.workspace = true
|
|||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
cfg-if.workspace = true
|
cfg-if.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
|
fail.workspace = true
|
||||||
flate2.workspace = true
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hyper0 = { workspace = true, features = ["full"] }
|
hyper0 = { workspace = true, features = ["full"] }
|
||||||
|
|||||||
@@ -67,12 +67,15 @@ use compute_tools::params::*;
|
|||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use compute_tools::swap::resize_swap;
|
use compute_tools::swap::resize_swap;
|
||||||
use rlimit::{setrlimit, Resource};
|
use rlimit::{setrlimit, Resource};
|
||||||
|
use utils::failpoint_support;
|
||||||
|
|
||||||
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
// in-case of not-set environment var
|
// in-case of not-set environment var
|
||||||
const BUILD_TAG_DEFAULT: &str = "latest";
|
const BUILD_TAG_DEFAULT: &str = "latest";
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
|
let scenario = failpoint_support::init();
|
||||||
|
|
||||||
let (build_tag, clap_args) = init()?;
|
let (build_tag, clap_args) = init()?;
|
||||||
|
|
||||||
// enable core dumping for all child processes
|
// enable core dumping for all child processes
|
||||||
@@ -100,6 +103,8 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
maybe_delay_exit(delay_exit);
|
maybe_delay_exit(delay_exit);
|
||||||
|
|
||||||
|
scenario.teardown();
|
||||||
|
|
||||||
deinit_and_exit(wait_pg_result);
|
deinit_and_exit(wait_pg_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,47 +251,48 @@ fn try_spec_from_cli(
|
|||||||
let compute_id = matches.get_one::<String>("compute-id");
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
let spec;
|
// First, try to get cluster spec from the cli argument
|
||||||
let mut live_config_allowed = false;
|
if let Some(spec_json) = spec_json {
|
||||||
match spec_json {
|
info!("got spec from cli argument {}", spec_json);
|
||||||
// First, try to get cluster spec from the cli argument
|
return Ok(CliSpecParams {
|
||||||
Some(json) => {
|
spec: Some(serde_json::from_str(spec_json)?),
|
||||||
info!("got spec from cli argument {}", json);
|
live_config_allowed: false,
|
||||||
spec = Some(serde_json::from_str(json)?);
|
});
|
||||||
}
|
}
|
||||||
None => {
|
|
||||||
// Second, try to read it from the file if path is provided
|
// Second, try to read it from the file if path is provided
|
||||||
if let Some(sp) = spec_path {
|
if let Some(spec_path) = spec_path {
|
||||||
let path = Path::new(sp);
|
let file = File::open(Path::new(spec_path))?;
|
||||||
let file = File::open(path)?;
|
return Ok(CliSpecParams {
|
||||||
spec = Some(serde_json::from_reader(file)?);
|
spec: Some(serde_json::from_reader(file)?),
|
||||||
live_config_allowed = true;
|
live_config_allowed: true,
|
||||||
} else if let Some(id) = compute_id {
|
});
|
||||||
if let Some(cp_base) = control_plane_uri {
|
}
|
||||||
live_config_allowed = true;
|
|
||||||
spec = match get_spec_from_control_plane(cp_base, id) {
|
let Some(compute_id) = compute_id else {
|
||||||
Ok(s) => s,
|
panic!(
|
||||||
Err(e) => {
|
"compute spec should be provided by one of the following ways: \
|
||||||
error!("cannot get response from control plane: {}", e);
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
);
|
||||||
}
|
};
|
||||||
};
|
let Some(control_plane_uri) = control_plane_uri else {
|
||||||
} else {
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
panic!("must specify both --control-plane-uri and --compute-id or none");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic!(
|
|
||||||
"compute spec should be provided by one of the following ways: \
|
|
||||||
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(CliSpecParams {
|
match get_spec_from_control_plane(control_plane_uri, compute_id) {
|
||||||
spec,
|
Ok(spec) => Ok(CliSpecParams {
|
||||||
live_config_allowed,
|
spec,
|
||||||
})
|
live_config_allowed: true,
|
||||||
|
}),
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"cannot get response from control plane: {}\n\
|
||||||
|
neither spec nor confirmation that compute is in the Empty state was received",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CliSpecParams {
|
struct CliSpecParams {
|
||||||
@@ -418,9 +424,14 @@ fn start_postgres(
|
|||||||
"running compute with features: {:?}",
|
"running compute with features: {:?}",
|
||||||
state.pspec.as_ref().unwrap().spec.features
|
state.pspec.as_ref().unwrap().spec.features
|
||||||
);
|
);
|
||||||
// before we release the mutex, fetch the swap size (if any) for later.
|
// before we release the mutex, fetch some parameters for later.
|
||||||
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
|
let &ComputeSpec {
|
||||||
let disk_quota_bytes = state.pspec.as_ref().unwrap().spec.disk_quota_bytes;
|
swap_size_bytes,
|
||||||
|
disk_quota_bytes,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
disable_lfc_resizing,
|
||||||
|
..
|
||||||
|
} = &state.pspec.as_ref().unwrap().spec;
|
||||||
drop(state);
|
drop(state);
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
@@ -525,11 +536,18 @@ fn start_postgres(
|
|||||||
// This token is used internally by the monitor to clean up all threads
|
// This token is used internally by the monitor to clean up all threads
|
||||||
let token = CancellationToken::new();
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
|
// don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
|
||||||
|
let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
file_cache_connstr.cloned()
|
||||||
|
};
|
||||||
|
|
||||||
let vm_monitor = rt.as_ref().map(|rt| {
|
let vm_monitor = rt.as_ref().map(|rt| {
|
||||||
rt.spawn(vm_monitor::start(
|
rt.spawn(vm_monitor::start(
|
||||||
Box::leak(Box::new(vm_monitor::Args {
|
Box::leak(Box::new(vm_monitor::Args {
|
||||||
cgroup: cgroup.cloned(),
|
cgroup: cgroup.cloned(),
|
||||||
pgconnstr: file_cache_connstr.cloned(),
|
pgconnstr,
|
||||||
addr: vm_monitor_addr.clone(),
|
addr: vm_monitor_addr.clone(),
|
||||||
})),
|
})),
|
||||||
token.clone(),
|
token.clone(),
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ use nix::unistd::Pid;
|
|||||||
use tracing::{info, info_span, warn, Instrument};
|
use tracing::{info, info_span, warn, Instrument};
|
||||||
use utils::fs_ext::is_directory_empty;
|
use utils::fs_ext::is_directory_empty;
|
||||||
|
|
||||||
|
#[path = "fast_import/aws_s3_sync.rs"]
|
||||||
|
mod aws_s3_sync;
|
||||||
#[path = "fast_import/child_stdio_to_log.rs"]
|
#[path = "fast_import/child_stdio_to_log.rs"]
|
||||||
mod child_stdio_to_log;
|
mod child_stdio_to_log;
|
||||||
#[path = "fast_import/s3_uri.rs"]
|
#[path = "fast_import/s3_uri.rs"]
|
||||||
mod s3_uri;
|
mod s3_uri;
|
||||||
#[path = "fast_import/s5cmd.rs"]
|
|
||||||
mod s5cmd;
|
|
||||||
|
|
||||||
#[derive(clap::Parser)]
|
#[derive(clap::Parser)]
|
||||||
struct Args {
|
struct Args {
|
||||||
@@ -326,7 +326,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("upload pgdata");
|
info!("upload pgdata");
|
||||||
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
|
aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
|
||||||
.await
|
.await
|
||||||
.context("sync dump directory to destination")?;
|
.context("sync dump directory to destination")?;
|
||||||
|
|
||||||
@@ -334,10 +334,10 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let status_dir = working_directory.join("status");
|
let status_dir = working_directory.join("status");
|
||||||
std::fs::create_dir(&status_dir).context("create status directory")?;
|
std::fs::create_dir(&status_dir).context("create status directory")?;
|
||||||
let status_file = status_dir.join("status");
|
let status_file = status_dir.join("pgdata");
|
||||||
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
||||||
.context("write status file")?;
|
.context("write status file")?;
|
||||||
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
|
aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
|
||||||
.await
|
.await
|
||||||
.context("sync status directory to destination")?;
|
.context("sync status directory to destination")?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,24 +4,21 @@ use camino::Utf8Path;
|
|||||||
use super::s3_uri::S3Uri;
|
use super::s3_uri::S3Uri;
|
||||||
|
|
||||||
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
||||||
let mut builder = tokio::process::Command::new("s5cmd");
|
let mut builder = tokio::process::Command::new("aws");
|
||||||
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
|
|
||||||
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
|
|
||||||
builder.arg("--endpoint-url").arg(val);
|
|
||||||
}
|
|
||||||
builder
|
builder
|
||||||
|
.arg("s3")
|
||||||
.arg("sync")
|
.arg("sync")
|
||||||
.arg(local.as_str())
|
.arg(local.as_str())
|
||||||
.arg(remote.to_string());
|
.arg(remote.to_string());
|
||||||
let st = builder
|
let st = builder
|
||||||
.spawn()
|
.spawn()
|
||||||
.context("spawn s5cmd")?
|
.context("spawn aws s3 sync")?
|
||||||
.wait()
|
.wait()
|
||||||
.await
|
.await
|
||||||
.context("wait for s5cmd")?;
|
.context("wait for aws s3 sync")?;
|
||||||
if st.success() {
|
if st.success() {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow::anyhow!("s5cmd failed"))
|
Err(anyhow::anyhow!("aws s3 sync failed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -15,7 +15,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use compute_api::spec::{PgIdent, Role};
|
use compute_api::spec::{Database, PgIdent, Role};
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
@@ -45,8 +45,10 @@ use crate::spec_apply::ApplySpecPhase::{
|
|||||||
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
||||||
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
||||||
};
|
};
|
||||||
|
use crate::spec_apply::PerDatabasePhase;
|
||||||
use crate::spec_apply::PerDatabasePhase::{
|
use crate::spec_apply::PerDatabasePhase::{
|
||||||
ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
|
ChangeSchemaPerms, DeleteDBRoleReferences, DropSubscriptionsForDeletedDatabases,
|
||||||
|
HandleAnonExtension,
|
||||||
};
|
};
|
||||||
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
||||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||||
@@ -834,7 +836,7 @@ impl ComputeNode {
|
|||||||
conf
|
conf
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_maintenance_client(
|
pub async fn get_maintenance_client(
|
||||||
conf: &tokio_postgres::Config,
|
conf: &tokio_postgres::Config,
|
||||||
) -> Result<tokio_postgres::Client> {
|
) -> Result<tokio_postgres::Client> {
|
||||||
let mut conf = conf.clone();
|
let mut conf = conf.clone();
|
||||||
@@ -943,6 +945,78 @@ impl ComputeNode {
|
|||||||
dbs: databases,
|
dbs: databases,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
// Apply special pre drop database phase.
|
||||||
|
// NOTE: we use the code of RunInEachDatabase phase for parallelism
|
||||||
|
// and connection management, but we don't really run it in *each* database,
|
||||||
|
// only in databases, we're about to drop.
|
||||||
|
info!("Applying PerDatabase (pre-dropdb) phase");
|
||||||
|
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
||||||
|
|
||||||
|
// Run the phase for each database that we're about to drop.
|
||||||
|
let db_processes = spec
|
||||||
|
.delta_operations
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.filter_map(move |op| {
|
||||||
|
if op.action.as_str() == "delete_db" {
|
||||||
|
Some(op.name.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(|dbname| {
|
||||||
|
let spec = spec.clone();
|
||||||
|
let ctx = ctx.clone();
|
||||||
|
let jwks_roles = jwks_roles.clone();
|
||||||
|
let mut conf = conf.as_ref().clone();
|
||||||
|
let concurrency_token = concurrency_token.clone();
|
||||||
|
// We only need dbname field for this phase, so set other fields to dummy values
|
||||||
|
let db = DB::UserDB(Database {
|
||||||
|
name: dbname.clone(),
|
||||||
|
owner: "cloud_admin".to_string(),
|
||||||
|
options: None,
|
||||||
|
restrict_conn: false,
|
||||||
|
invalid: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
debug!("Applying per-database phases for Database {:?}", &db);
|
||||||
|
|
||||||
|
match &db {
|
||||||
|
DB::SystemDB => {}
|
||||||
|
DB::UserDB(db) => {
|
||||||
|
conf.dbname(db.name.as_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let conf = Arc::new(conf);
|
||||||
|
let fut = Self::apply_spec_sql_db(
|
||||||
|
spec.clone(),
|
||||||
|
conf,
|
||||||
|
ctx.clone(),
|
||||||
|
jwks_roles.clone(),
|
||||||
|
concurrency_token.clone(),
|
||||||
|
db,
|
||||||
|
[DropSubscriptionsForDeletedDatabases].to_vec(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(spawn(fut))
|
||||||
|
})
|
||||||
|
.collect::<Vec<Result<_, anyhow::Error>>>();
|
||||||
|
|
||||||
|
for process in db_processes.into_iter() {
|
||||||
|
let handle = process?;
|
||||||
|
if let Err(e) = handle.await? {
|
||||||
|
// Handle the error case where the database does not exist
|
||||||
|
// We do not check whether the DB exists or not in the deletion phase,
|
||||||
|
// so we shouldn't be strict about it in pre-deletion cleanup as well.
|
||||||
|
if e.to_string().contains("does not exist") {
|
||||||
|
warn!("Error dropping subscription: {}", e);
|
||||||
|
} else {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
for phase in [
|
for phase in [
|
||||||
CreateSuperUser,
|
CreateSuperUser,
|
||||||
DropInvalidDatabases,
|
DropInvalidDatabases,
|
||||||
@@ -962,7 +1036,7 @@ impl ComputeNode {
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Applying RunInEachDatabase phase");
|
info!("Applying RunInEachDatabase2 phase");
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
||||||
|
|
||||||
let db_processes = spec
|
let db_processes = spec
|
||||||
@@ -997,6 +1071,12 @@ impl ComputeNode {
|
|||||||
jwks_roles.clone(),
|
jwks_roles.clone(),
|
||||||
concurrency_token.clone(),
|
concurrency_token.clone(),
|
||||||
db,
|
db,
|
||||||
|
[
|
||||||
|
DeleteDBRoleReferences,
|
||||||
|
ChangeSchemaPerms,
|
||||||
|
HandleAnonExtension,
|
||||||
|
]
|
||||||
|
.to_vec(),
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(spawn(fut))
|
Ok(spawn(fut))
|
||||||
@@ -1043,16 +1123,13 @@ impl ComputeNode {
|
|||||||
jwks_roles: Arc<HashSet<String>>,
|
jwks_roles: Arc<HashSet<String>>,
|
||||||
concurrency_token: Arc<tokio::sync::Semaphore>,
|
concurrency_token: Arc<tokio::sync::Semaphore>,
|
||||||
db: DB,
|
db: DB,
|
||||||
|
subphases: Vec<PerDatabasePhase>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let _permit = concurrency_token.acquire().await?;
|
let _permit = concurrency_token.acquire().await?;
|
||||||
|
|
||||||
let mut client_conn = None;
|
let mut client_conn = None;
|
||||||
|
|
||||||
for subphase in [
|
for subphase in subphases {
|
||||||
DeleteDBRoleReferences,
|
|
||||||
ChangeSchemaPerms,
|
|
||||||
HandleAnonExtension,
|
|
||||||
] {
|
|
||||||
apply_operations(
|
apply_operations(
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
ctx.clone(),
|
ctx.clone(),
|
||||||
@@ -1181,8 +1258,19 @@ impl ComputeNode {
|
|||||||
let mut conf = postgres::config::Config::from(conf);
|
let mut conf = postgres::config::Config::from(conf);
|
||||||
conf.application_name("compute_ctl:migrations");
|
conf.application_name("compute_ctl:migrations");
|
||||||
|
|
||||||
let mut client = conf.connect(NoTls)?;
|
match conf.connect(NoTls) {
|
||||||
handle_migrations(&mut client).context("apply_config handle_migrations")
|
Ok(mut client) => {
|
||||||
|
if let Err(e) = handle_migrations(&mut client) {
|
||||||
|
error!("Failed to run migrations: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to connect to the compute for running migrations: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
Ok::<(), anyhow::Error>(())
|
||||||
|
|||||||
@@ -24,8 +24,11 @@ use metrics::proto::MetricFamily;
|
|||||||
use metrics::Encoder;
|
use metrics::Encoder;
|
||||||
use metrics::TextEncoder;
|
use metrics::TextEncoder;
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
use tracing_utils::http::OtelName;
|
use tracing_utils::http::OtelName;
|
||||||
|
use utils::failpoint_support::failpoints_handler;
|
||||||
|
use utils::http::error::ApiError;
|
||||||
use utils::http::request::must_get_query_param;
|
use utils::http::request::must_get_query_param;
|
||||||
|
|
||||||
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
@@ -310,6 +313,18 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/failpoints") if cfg!(feature = "testing") => {
|
||||||
|
match failpoints_handler(req, CancellationToken::new()).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(ApiError::BadRequest(e)) => {
|
||||||
|
render_json_error(&e.to_string(), StatusCode::BAD_REQUEST)
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
render_json_error("Internal server error", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// download extension files from remote extension storage on demand
|
// download extension files from remote extension storage on demand
|
||||||
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
info!("serving {:?} POST request", route);
|
info!("serving {:?} POST request", route);
|
||||||
|
|||||||
@@ -537,12 +537,14 @@ components:
|
|||||||
properties:
|
properties:
|
||||||
extname:
|
extname:
|
||||||
type: string
|
type: string
|
||||||
versions:
|
version:
|
||||||
type: array
|
type: string
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
n_databases:
|
n_databases:
|
||||||
type: integer
|
type: integer
|
||||||
|
owned_by_superuser:
|
||||||
|
type: integer
|
||||||
|
|
||||||
SetRoleGrantsRequest:
|
SetRoleGrantsRequest:
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||||
use metrics::proto::MetricFamily;
|
use metrics::proto::MetricFamily;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
@@ -38,61 +37,77 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
|
|||||||
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
||||||
///
|
///
|
||||||
/// Same extension can be installed in multiple databases with different versions,
|
/// Same extension can be installed in multiple databases with different versions,
|
||||||
/// we only keep the highest and lowest version across all databases.
|
/// so we report a separate metric (number of databases where it is installed)
|
||||||
|
/// for each extension version.
|
||||||
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
||||||
conf.application_name("compute_ctl:get_installed_extensions");
|
conf.application_name("compute_ctl:get_installed_extensions");
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
let databases: Vec<String> = list_dbs(&mut client)?;
|
let databases: Vec<String> = list_dbs(&mut client)?;
|
||||||
|
|
||||||
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
|
||||||
for db in databases.iter() {
|
for db in databases.iter() {
|
||||||
conf.dbname(db);
|
conf.dbname(db);
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
let extensions: Vec<(String, String)> = db_client
|
let extensions: Vec<(String, String, i32)> = db_client
|
||||||
.query(
|
.query(
|
||||||
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension",
|
||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| (row.get("extname"), row.get("extversion")))
|
.map(|row| {
|
||||||
|
(
|
||||||
|
row.get("extname"),
|
||||||
|
row.get("extversion"),
|
||||||
|
row.get("extowner"),
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (extname, v) in extensions.iter() {
|
for (extname, v, extowner) in extensions.iter() {
|
||||||
let version = v.to_string();
|
let version = v.to_string();
|
||||||
|
|
||||||
// increment the number of databases where the version of extension is installed
|
// check if the extension is owned by superuser
|
||||||
INSTALLED_EXTENSIONS
|
// 10 is the oid of superuser
|
||||||
.with_label_values(&[extname, &version])
|
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
|
||||||
.inc();
|
|
||||||
|
|
||||||
extensions_map
|
extensions_map
|
||||||
.entry(extname.to_string())
|
.entry((
|
||||||
|
extname.to_string(),
|
||||||
|
version.clone(),
|
||||||
|
owned_by_superuser.to_string(),
|
||||||
|
))
|
||||||
.and_modify(|e| {
|
.and_modify(|e| {
|
||||||
e.versions.insert(version.clone());
|
|
||||||
// count the number of databases where the extension is installed
|
// count the number of databases where the extension is installed
|
||||||
e.n_databases += 1;
|
e.n_databases += 1;
|
||||||
})
|
})
|
||||||
.or_insert(InstalledExtension {
|
.or_insert(InstalledExtension {
|
||||||
extname: extname.to_string(),
|
extname: extname.to_string(),
|
||||||
versions: HashSet::from([version.clone()]),
|
version: version.clone(),
|
||||||
n_databases: 1,
|
n_databases: 1,
|
||||||
|
owned_by_superuser: owned_by_superuser.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = InstalledExtensions {
|
for (key, ext) in extensions_map.iter() {
|
||||||
extensions: extensions_map.into_values().collect(),
|
let (extname, version, owned_by_superuser) = key;
|
||||||
};
|
let n_databases = ext.n_databases as u64;
|
||||||
|
|
||||||
Ok(res)
|
INSTALLED_EXTENSIONS
|
||||||
|
.with_label_values(&[extname, version, owned_by_superuser])
|
||||||
|
.set(n_databases);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(InstalledExtensions {
|
||||||
|
extensions: extensions_map.into_values().collect(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
register_uint_gauge_vec!(
|
register_uint_gauge_vec!(
|
||||||
"compute_installed_extensions",
|
"compute_installed_extensions",
|
||||||
"Number of databases where the version of extension is installed",
|
"Number of databases where the version of extension is installed",
|
||||||
&["extension_name", "version"]
|
&["extension_name", "version", "owned_by_superuser"]
|
||||||
)
|
)
|
||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
|
use fail::fail_point;
|
||||||
use postgres::Client;
|
use postgres::Client;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
/// Runs a series of migrations on a target database
|
||||||
pub(crate) struct MigrationRunner<'m> {
|
pub(crate) struct MigrationRunner<'m> {
|
||||||
client: &'m mut Client,
|
client: &'m mut Client,
|
||||||
migrations: &'m [&'m str],
|
migrations: &'m [&'m str],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'m> MigrationRunner<'m> {
|
impl<'m> MigrationRunner<'m> {
|
||||||
|
/// Create a new migration runner
|
||||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
||||||
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
||||||
assert!(migrations.len() + 1 < i64::MAX as usize);
|
assert!(migrations.len() + 1 < i64::MAX as usize);
|
||||||
@@ -15,6 +18,7 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Self { client, migrations }
|
Self { client, migrations }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the current value neon_migration.migration_id
|
||||||
fn get_migration_id(&mut self) -> Result<i64> {
|
fn get_migration_id(&mut self) -> Result<i64> {
|
||||||
let query = "SELECT id FROM neon_migration.migration_id";
|
let query = "SELECT id FROM neon_migration.migration_id";
|
||||||
let row = self
|
let row = self
|
||||||
@@ -25,37 +29,61 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Ok(row.get::<&str, i64>("id"))
|
Ok(row.get::<&str, i64>("id"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update the neon_migration.migration_id value
|
||||||
|
///
|
||||||
|
/// This function has a fail point called compute-migration, which can be
|
||||||
|
/// used if you would like to fail the application of a series of migrations
|
||||||
|
/// at some point.
|
||||||
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
||||||
let setval = format!("UPDATE neon_migration.migration_id SET id={}", migration_id);
|
// We use this fail point in order to check that failing in the
|
||||||
|
// middle of applying a series of migrations fails in an expected
|
||||||
|
// manner
|
||||||
|
if cfg!(feature = "testing") {
|
||||||
|
let fail = (|| {
|
||||||
|
fail_point!("compute-migration", |fail_migration_id| {
|
||||||
|
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
false
|
||||||
|
})();
|
||||||
|
|
||||||
|
if fail {
|
||||||
|
return Err(anyhow::anyhow!(format!(
|
||||||
|
"migration {} was configured to fail because of a failpoint",
|
||||||
|
migration_id
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
.simple_query(&setval)
|
.query(
|
||||||
|
"UPDATE neon_migration.migration_id SET id = $1",
|
||||||
|
&[&migration_id],
|
||||||
|
)
|
||||||
.context("run_migrations update id")?;
|
.context("run_migrations update id")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_migrations(&mut self) -> Result<()> {
|
/// Prepare the migrations the target database for handling migrations
|
||||||
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
fn prepare_database(&mut self) -> Result<()> {
|
||||||
self.client.simple_query(query)?;
|
self.client
|
||||||
|
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")?;
|
||||||
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)")?;
|
||||||
self.client.simple_query(query)?;
|
self.client.simple_query(
|
||||||
|
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
|
||||||
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
)?;
|
||||||
self.client.simple_query(query)?;
|
self.client
|
||||||
|
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")?;
|
||||||
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
self.client
|
||||||
self.client.simple_query(query)?;
|
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")?;
|
||||||
|
|
||||||
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
|
||||||
self.client.simple_query(query)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Run the configrured set of migrations
|
||||||
pub fn run_migrations(mut self) -> Result<()> {
|
pub fn run_migrations(mut self) -> Result<()> {
|
||||||
self.prepare_migrations()?;
|
self.prepare_database()?;
|
||||||
|
|
||||||
let mut current_migration = self.get_migration_id()? as usize;
|
let mut current_migration = self.get_migration_id()? as usize;
|
||||||
while current_migration < self.migrations.len() {
|
while current_migration < self.migrations.len() {
|
||||||
@@ -69,6 +97,11 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
|
|
||||||
if migration.starts_with("-- SKIP") {
|
if migration.starts_with("-- SKIP") {
|
||||||
info!("Skipping migration id={}", migration_id!(current_migration));
|
info!("Skipping migration id={}", migration_id!(current_migration));
|
||||||
|
|
||||||
|
// Even though we are skipping the migration, updating the
|
||||||
|
// migration ID should help keep logic easy to understand when
|
||||||
|
// trying to understand the state of a cluster.
|
||||||
|
self.update_migration_id(migration_id!(current_migration))?;
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"Running migration id={}:\n{}\n",
|
"Running migration id={}:\n{}\n",
|
||||||
@@ -87,7 +120,6 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Migration IDs start at 1
|
|
||||||
self.update_migration_id(migration_id!(current_migration))?;
|
self.update_migration_id(migration_id!(current_migration))?;
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
bypassrls boolean;
|
||||||
|
BEGIN
|
||||||
|
SELECT rolbypassrls INTO bypassrls FROM pg_roles WHERE rolname = 'neon_superuser';
|
||||||
|
IF NOT bypassrls THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser cannot bypass RLS';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
25
compute_tools/src/migrations/tests/0002-alter_roles.sql
Normal file
25
compute_tools/src/migrations/tests/0002-alter_roles.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
role record;
|
||||||
|
BEGIN
|
||||||
|
FOR role IN
|
||||||
|
SELECT rolname AS name, rolinherit AS inherit
|
||||||
|
FROM pg_roles
|
||||||
|
WHERE pg_has_role(rolname, 'neon_superuser', 'member')
|
||||||
|
LOOP
|
||||||
|
IF NOT role.inherit THEN
|
||||||
|
RAISE EXCEPTION '% cannot inherit', quote_ident(role.name);
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
|
||||||
|
FOR role IN
|
||||||
|
SELECT rolname AS name, rolbypassrls AS bypassrls
|
||||||
|
FROM pg_roles
|
||||||
|
WHERE NOT pg_has_role(rolname, 'neon_superuser', 'member')
|
||||||
|
AND NOT starts_with(rolname, 'pg_')
|
||||||
|
LOOP
|
||||||
|
IF role.bypassrls THEN
|
||||||
|
RAISE EXCEPTION '% can bypass RLS', quote_ident(role.name);
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF (SELECT current_setting('server_version_num')::numeric < 160000) THEN
|
||||||
|
RETURN;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF NOT (SELECT pg_has_role('neon_superuser', 'pg_create_subscription', 'member')) THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser cannot execute pg_create_subscription';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
monitor record;
|
||||||
|
BEGIN
|
||||||
|
SELECT pg_has_role('neon_superuser', 'pg_monitor', 'member') AS member,
|
||||||
|
admin_option AS admin
|
||||||
|
INTO monitor
|
||||||
|
FROM pg_auth_members
|
||||||
|
WHERE roleid = 'pg_monitor'::regrole
|
||||||
|
AND member = 'pg_monitor'::regrole;
|
||||||
|
|
||||||
|
IF NOT monitor.member THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF NOT monitor.admin THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- This test was never written becuase at the time migration tests were added
|
||||||
|
-- the accompanying migration was already skipped.
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- This test was never written becuase at the time migration tests were added
|
||||||
|
-- the accompanying migration was already skipped.
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- This test was never written becuase at the time migration tests were added
|
||||||
|
-- the accompanying migration was already skipped.
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- This test was never written becuase at the time migration tests were added
|
||||||
|
-- the accompanying migration was already skipped.
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- This test was never written becuase at the time migration tests were added
|
||||||
|
-- the accompanying migration was already skipped.
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
can_execute boolean;
|
||||||
|
BEGIN
|
||||||
|
SELECT bool_and(has_function_privilege('neon_superuser', oid, 'execute'))
|
||||||
|
INTO can_execute
|
||||||
|
FROM pg_proc
|
||||||
|
WHERE proname IN ('pg_export_snapshot', 'pg_log_standby_snapshot')
|
||||||
|
AND pronamespace = 'pg_catalog'::regnamespace;
|
||||||
|
IF NOT can_execute THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser cannot execute both pg_export_snapshot and pg_log_standby_snapshot';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
can_execute boolean;
|
||||||
|
BEGIN
|
||||||
|
SELECT has_function_privilege('neon_superuser', oid, 'execute')
|
||||||
|
INTO can_execute
|
||||||
|
FROM pg_proc
|
||||||
|
WHERE proname = 'pg_show_replication_origin_status'
|
||||||
|
AND pronamespace = 'pg_catalog'::regnamespace;
|
||||||
|
IF NOT can_execute THEN
|
||||||
|
RAISE EXCEPTION 'neon_superuser cannot execute pg_show_replication_origin_status';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
@@ -47,6 +47,7 @@ pub enum PerDatabasePhase {
|
|||||||
DeleteDBRoleReferences,
|
DeleteDBRoleReferences,
|
||||||
ChangeSchemaPerms,
|
ChangeSchemaPerms,
|
||||||
HandleAnonExtension,
|
HandleAnonExtension,
|
||||||
|
DropSubscriptionsForDeletedDatabases,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -326,13 +327,12 @@ async fn get_operations<'a>(
|
|||||||
|
|
||||||
// Use FORCE to drop database even if there are active connections.
|
// Use FORCE to drop database even if there are active connections.
|
||||||
// We run this from `cloud_admin`, so it should have enough privileges.
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
|
//
|
||||||
// NB: there could be other db states, which prevent us from dropping
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
// the database. For example, if db is used by any active subscription
|
// the database. For example, if db is used by any active subscription
|
||||||
// or replication slot.
|
// or replication slot.
|
||||||
// TODO: deal with it once we allow logical replication. Proper fix should
|
// Such cases are handled in the DropSubscriptionsForDeletedDatabases
|
||||||
// involve returning an error code to the control plane, so it could
|
// phase. We do all the cleanup before actually dropping the database.
|
||||||
// figure out that this is a non-retryable error, return it to the user
|
|
||||||
// and fail operation permanently.
|
|
||||||
let drop_db_query: String = format!(
|
let drop_db_query: String = format!(
|
||||||
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
&op.name.pg_quote()
|
&op.name.pg_quote()
|
||||||
@@ -444,6 +444,30 @@ async fn get_operations<'a>(
|
|||||||
}
|
}
|
||||||
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
||||||
match subphase {
|
match subphase {
|
||||||
|
PerDatabasePhase::DropSubscriptionsForDeletedDatabases => {
|
||||||
|
match &db {
|
||||||
|
DB::UserDB(db) => {
|
||||||
|
let drop_subscription_query: String = format!(
|
||||||
|
include_str!("sql/drop_subscription_for_drop_dbs.sql"),
|
||||||
|
datname_str = escape_literal(&db.name),
|
||||||
|
);
|
||||||
|
|
||||||
|
let operations = vec![Operation {
|
||||||
|
query: drop_subscription_query,
|
||||||
|
comment: Some(format!(
|
||||||
|
"optionally dropping subscriptions for DB {}",
|
||||||
|
db.name,
|
||||||
|
)),
|
||||||
|
}]
|
||||||
|
.into_iter();
|
||||||
|
|
||||||
|
Ok(Box::new(operations))
|
||||||
|
}
|
||||||
|
// skip this cleanup for the system databases
|
||||||
|
// because users can't drop them
|
||||||
|
DB::SystemDB => Ok(Box::new(empty())),
|
||||||
|
}
|
||||||
|
}
|
||||||
PerDatabasePhase::DeleteDBRoleReferences => {
|
PerDatabasePhase::DeleteDBRoleReferences => {
|
||||||
let ctx = ctx.read().await;
|
let ctx = ctx.read().await;
|
||||||
|
|
||||||
|
|||||||
11
compute_tools/src/sql/drop_subscription_for_drop_dbs.sql
Normal file
11
compute_tools/src/sql/drop_subscription_for_drop_dbs.sql
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
subname TEXT;
|
||||||
|
BEGIN
|
||||||
|
FOR subname IN SELECT pg_subscription.subname FROM pg_subscription WHERE subdbid = (SELECT oid FROM pg_database WHERE datname = {datname_str}) LOOP
|
||||||
|
EXECUTE format('ALTER SUBSCRIPTION %I DISABLE;', subname);
|
||||||
|
EXECUTE format('ALTER SUBSCRIPTION %I SET (slot_name = NONE);', subname);
|
||||||
|
EXECUTE format('DROP SUBSCRIPTION %I;', subname);
|
||||||
|
END LOOP;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
@@ -274,6 +274,7 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|||||||
for env_key in [
|
for env_key in [
|
||||||
"AWS_ACCESS_KEY_ID",
|
"AWS_ACCESS_KEY_ID",
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_SESSION_TOKEN",
|
||||||
"AWS_PROFILE",
|
"AWS_PROFILE",
|
||||||
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
||||||
"HOME",
|
"HOME",
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use control_plane::storage_controller::{
|
|||||||
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
||||||
};
|
};
|
||||||
use control_plane::{broker, local_env};
|
use control_plane::{broker, local_env};
|
||||||
|
use nix::fcntl::{flock, FlockArg};
|
||||||
use pageserver_api::config::{
|
use pageserver_api::config::{
|
||||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
||||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
||||||
@@ -36,6 +37,8 @@ use safekeeper_api::{
|
|||||||
};
|
};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::{BTreeSet, HashMap};
|
use std::collections::{BTreeSet, HashMap};
|
||||||
|
use std::fs::File;
|
||||||
|
use std::os::fd::AsRawFd;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -689,6 +692,21 @@ struct TimelineTreeEl {
|
|||||||
pub children: BTreeSet<TimelineId>,
|
pub children: BTreeSet<TimelineId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A flock-based guard over the neon_local repository directory
|
||||||
|
struct RepoLock {
|
||||||
|
_file: File,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RepoLock {
|
||||||
|
fn new() -> Result<Self> {
|
||||||
|
let repo_dir = File::open(local_env::base_path())?;
|
||||||
|
let repo_dir_fd = repo_dir.as_raw_fd();
|
||||||
|
flock(repo_dir_fd, FlockArg::LockExclusive)?;
|
||||||
|
|
||||||
|
Ok(Self { _file: repo_dir })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Main entry point for the 'neon_local' CLI utility
|
// Main entry point for the 'neon_local' CLI utility
|
||||||
//
|
//
|
||||||
// This utility helps to manage neon installation. That includes following:
|
// This utility helps to manage neon installation. That includes following:
|
||||||
@@ -700,9 +718,14 @@ fn main() -> Result<()> {
|
|||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
// Check for 'neon init' command first.
|
// Check for 'neon init' command first.
|
||||||
let subcommand_result = if let NeonLocalCmd::Init(args) = cli.command {
|
let (subcommand_result, _lock) = if let NeonLocalCmd::Init(args) = cli.command {
|
||||||
handle_init(&args).map(|env| Some(Cow::Owned(env)))
|
(handle_init(&args).map(|env| Some(Cow::Owned(env))), None)
|
||||||
} else {
|
} else {
|
||||||
|
// This tool uses a collection of simple files to store its state, and consequently
|
||||||
|
// it is not generally safe to run multiple commands concurrently. Rather than expect
|
||||||
|
// all callers to know this, use a lock file to protect against concurrent execution.
|
||||||
|
let _repo_lock = RepoLock::new().unwrap();
|
||||||
|
|
||||||
// all other commands need an existing config
|
// all other commands need an existing config
|
||||||
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
||||||
let original_env = env.clone();
|
let original_env = env.clone();
|
||||||
@@ -728,11 +751,12 @@ fn main() -> Result<()> {
|
|||||||
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
||||||
};
|
};
|
||||||
|
|
||||||
if &original_env != env {
|
let subcommand_result = if &original_env != env {
|
||||||
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
||||||
} else {
|
} else {
|
||||||
subcommand_result.map(|()| None)
|
subcommand_result.map(|()| None)
|
||||||
}
|
};
|
||||||
|
(subcommand_result, Some(_repo_lock))
|
||||||
};
|
};
|
||||||
|
|
||||||
match subcommand_result {
|
match subcommand_result {
|
||||||
@@ -922,7 +946,7 @@ fn handle_init(args: &InitCmdArgs) -> anyhow::Result<LocalEnv> {
|
|||||||
} else {
|
} else {
|
||||||
// User (likely interactive) did not provide a description of the environment, give them the default
|
// User (likely interactive) did not provide a description of the environment, give them the default
|
||||||
NeonLocalInitConf {
|
NeonLocalInitConf {
|
||||||
control_plane_api: Some(Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap())),
|
control_plane_api: Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap()),
|
||||||
broker: NeonBroker {
|
broker: NeonBroker {
|
||||||
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
||||||
},
|
},
|
||||||
@@ -1718,18 +1742,15 @@ async fn handle_start_all_impl(
|
|||||||
broker::start_broker_process(env, &retry_timeout).await
|
broker::start_broker_process(env, &retry_timeout).await
|
||||||
});
|
});
|
||||||
|
|
||||||
// Only start the storage controller if the pageserver is configured to need it
|
js.spawn(async move {
|
||||||
if env.control_plane_api.is_some() {
|
let storage_controller = StorageController::from_env(env);
|
||||||
js.spawn(async move {
|
storage_controller
|
||||||
let storage_controller = StorageController::from_env(env);
|
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
||||||
storage_controller
|
retry_timeout,
|
||||||
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
))
|
||||||
retry_timeout,
|
.await
|
||||||
))
|
.map_err(|e| e.context("start storage_controller"))
|
||||||
.await
|
});
|
||||||
.map_err(|e| e.context("start storage_controller"))
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for ps_conf in &env.pageservers {
|
for ps_conf in &env.pageservers {
|
||||||
js.spawn(async move {
|
js.spawn(async move {
|
||||||
@@ -1774,10 +1795,6 @@ async fn neon_start_status_check(
|
|||||||
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
||||||
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
if env.control_plane_api.is_none() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let storcon = StorageController::from_env(env);
|
let storcon = StorageController::from_env(env);
|
||||||
|
|
||||||
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
||||||
|
|||||||
@@ -316,6 +316,10 @@ impl Endpoint {
|
|||||||
// and can cause errors like 'no unpinned buffers available', see
|
// and can cause errors like 'no unpinned buffers available', see
|
||||||
// <https://github.com/neondatabase/neon/issues/9956>
|
// <https://github.com/neondatabase/neon/issues/9956>
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
|
// Postgres defaults to effective_io_concurrency=1, which does not exercise the pageserver's
|
||||||
|
// batching logic. Set this to 2 so that we exercise the code a bit without letting
|
||||||
|
// individual tests do a lot of concurrent work on underpowered test machines
|
||||||
|
conf.append("effective_io_concurrency", "2");
|
||||||
conf.append("fsync", "off");
|
conf.append("fsync", "off");
|
||||||
conf.append("max_connections", "100");
|
conf.append("max_connections", "100");
|
||||||
conf.append("wal_level", "logical");
|
conf.append("wal_level", "logical");
|
||||||
@@ -581,6 +585,7 @@ impl Endpoint {
|
|||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
swap_size_bytes: None,
|
swap_size_bytes: None,
|
||||||
disk_quota_bytes: None,
|
disk_quota_bytes: None,
|
||||||
|
disable_lfc_resizing: None,
|
||||||
cluster: Cluster {
|
cluster: Cluster {
|
||||||
cluster_id: None, // project ID: not used
|
cluster_id: None, // project ID: not used
|
||||||
name: None, // project name: not used
|
name: None, // project name: not used
|
||||||
@@ -810,7 +815,7 @@ impl Endpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let client = reqwest::Client::builder()
|
let client = reqwest::Client::builder()
|
||||||
.timeout(Duration::from_secs(30))
|
.timeout(Duration::from_secs(120))
|
||||||
.build()
|
.build()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let response = client
|
let response = client
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ pub struct LocalEnv {
|
|||||||
|
|
||||||
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
||||||
// be propagated into each pageserver's configuration.
|
// be propagated into each pageserver's configuration.
|
||||||
pub control_plane_api: Option<Url>,
|
pub control_plane_api: Url,
|
||||||
|
|
||||||
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
||||||
// storage controller's configuration.
|
// storage controller's configuration.
|
||||||
@@ -133,7 +133,7 @@ pub struct NeonLocalInitConf {
|
|||||||
pub storage_controller: Option<NeonStorageControllerConf>,
|
pub storage_controller: Option<NeonStorageControllerConf>,
|
||||||
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
||||||
pub safekeepers: Vec<SafekeeperConf>,
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
pub control_plane_api: Option<Option<Url>>,
|
pub control_plane_api: Option<Url>,
|
||||||
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ impl NeonStorageControllerConf {
|
|||||||
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
||||||
|
|
||||||
// Very tight heartbeat interval to speed up tests
|
// Very tight heartbeat interval to speed up tests
|
||||||
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
|
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for NeonStorageControllerConf {
|
impl Default for NeonStorageControllerConf {
|
||||||
@@ -535,7 +535,7 @@ impl LocalEnv {
|
|||||||
storage_controller,
|
storage_controller,
|
||||||
pageservers,
|
pageservers,
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api,
|
control_plane_api: control_plane_api.unwrap(),
|
||||||
control_plane_compute_hook_api,
|
control_plane_compute_hook_api,
|
||||||
branch_name_mappings,
|
branch_name_mappings,
|
||||||
}
|
}
|
||||||
@@ -638,7 +638,7 @@ impl LocalEnv {
|
|||||||
storage_controller: self.storage_controller.clone(),
|
storage_controller: self.storage_controller.clone(),
|
||||||
pageservers: vec![], // it's skip_serializing anyway
|
pageservers: vec![], // it's skip_serializing anyway
|
||||||
safekeepers: self.safekeepers.clone(),
|
safekeepers: self.safekeepers.clone(),
|
||||||
control_plane_api: self.control_plane_api.clone(),
|
control_plane_api: Some(self.control_plane_api.clone()),
|
||||||
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
||||||
branch_name_mappings: self.branch_name_mappings.clone(),
|
branch_name_mappings: self.branch_name_mappings.clone(),
|
||||||
},
|
},
|
||||||
@@ -768,7 +768,7 @@ impl LocalEnv {
|
|||||||
storage_controller: storage_controller.unwrap_or_default(),
|
storage_controller: storage_controller.unwrap_or_default(),
|
||||||
pageservers: pageservers.iter().map(Into::into).collect(),
|
pageservers: pageservers.iter().map(Into::into).collect(),
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api: control_plane_api.unwrap_or_default(),
|
control_plane_api: control_plane_api.unwrap(),
|
||||||
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
||||||
branch_name_mappings: Default::default(),
|
branch_name_mappings: Default::default(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -95,21 +95,19 @@ impl PageServerNode {
|
|||||||
|
|
||||||
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
||||||
|
|
||||||
if let Some(control_plane_api) = &self.env.control_plane_api {
|
overrides.push(format!(
|
||||||
overrides.push(format!(
|
"control_plane_api='{}'",
|
||||||
"control_plane_api='{}'",
|
self.env.control_plane_api.as_str()
|
||||||
control_plane_api.as_str()
|
));
|
||||||
));
|
|
||||||
|
|
||||||
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
||||||
// for us, we will also need it to talk to them.
|
// for us, we will also need it to talk to them.
|
||||||
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
||||||
let jwt_token = self
|
let jwt_token = self
|
||||||
.env
|
.env
|
||||||
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !conf.other.contains_key("remote_storage") {
|
if !conf.other.contains_key("remote_storage") {
|
||||||
@@ -435,7 +433,7 @@ impl PageServerNode {
|
|||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let config = Self::parse_config(settings)?;
|
let config = Self::parse_config(settings)?;
|
||||||
self.http_client
|
self.http_client
|
||||||
.tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
.set_tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -338,7 +338,7 @@ impl StorageController {
|
|||||||
.port(),
|
.port(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
let listen_url = self.env.control_plane_api.clone().unwrap();
|
let listen_url = self.env.control_plane_api.clone();
|
||||||
|
|
||||||
let listen = format!(
|
let listen = format!(
|
||||||
"{}:{}",
|
"{}:{}",
|
||||||
@@ -708,7 +708,7 @@ impl StorageController {
|
|||||||
} else {
|
} else {
|
||||||
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
||||||
// for general purpose API access.
|
// for general purpose API access.
|
||||||
let listen_url = self.env.control_plane_api.clone().unwrap();
|
let listen_url = self.env.control_plane_api.clone();
|
||||||
Url::from_str(&format!(
|
Url::from_str(&format!(
|
||||||
"http://{}:{}/{path}",
|
"http://{}:{}/{path}",
|
||||||
listen_url.host_str().unwrap(),
|
listen_url.host_str().unwrap(),
|
||||||
|
|||||||
@@ -5,12 +5,13 @@ use clap::{Parser, Subcommand};
|
|||||||
use pageserver_api::{
|
use pageserver_api::{
|
||||||
controller_api::{
|
controller_api::{
|
||||||
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
||||||
ShardSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
|
SafekeeperDescribeResponse, ShardSchedulingPolicy, TenantCreateRequest,
|
||||||
|
TenantDescribeResponse, TenantPolicyRequest,
|
||||||
},
|
},
|
||||||
models::{
|
models::{
|
||||||
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
||||||
ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest,
|
ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest,
|
||||||
TenantShardSplitResponse,
|
TenantShardSplitRequest, TenantShardSplitResponse,
|
||||||
},
|
},
|
||||||
shard::{ShardStripeSize, TenantShardId},
|
shard::{ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -116,9 +117,19 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_shard_id: TenantShardId,
|
tenant_shard_id: TenantShardId,
|
||||||
},
|
},
|
||||||
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
|
/// Set the pageserver tenant configuration of a tenant: this is the configuration structure
|
||||||
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
||||||
TenantConfig {
|
/// Any previous tenant configs are overwritten.
|
||||||
|
SetTenantConfig {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[arg(long)]
|
||||||
|
config: String,
|
||||||
|
},
|
||||||
|
/// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
|
||||||
|
/// provided JSON are unset from the tenant config and all fields with non-null values are set.
|
||||||
|
/// Unspecified fields are not changed.
|
||||||
|
PatchTenantConfig {
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
@@ -201,6 +212,8 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
timeout: humantime::Duration,
|
timeout: humantime::Duration,
|
||||||
},
|
},
|
||||||
|
/// List safekeepers known to the storage controller
|
||||||
|
Safekeepers {},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@@ -549,11 +562,21 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Command::TenantConfig { tenant_id, config } => {
|
Command::SetTenantConfig { tenant_id, config } => {
|
||||||
let tenant_conf = serde_json::from_str(&config)?;
|
let tenant_conf = serde_json::from_str(&config)?;
|
||||||
|
|
||||||
vps_client
|
vps_client
|
||||||
.tenant_config(&TenantConfigRequest {
|
.set_tenant_config(&TenantConfigRequest {
|
||||||
|
tenant_id,
|
||||||
|
config: tenant_conf,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::PatchTenantConfig { tenant_id, config } => {
|
||||||
|
let tenant_conf = serde_json::from_str(&config)?;
|
||||||
|
|
||||||
|
vps_client
|
||||||
|
.patch_tenant_config(&TenantConfigPatchRequest {
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: tenant_conf,
|
config: tenant_conf,
|
||||||
})
|
})
|
||||||
@@ -736,7 +759,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
threshold,
|
threshold,
|
||||||
} => {
|
} => {
|
||||||
vps_client
|
vps_client
|
||||||
.tenant_config(&TenantConfigRequest {
|
.set_tenant_config(&TenantConfigRequest {
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: TenantConfig {
|
config: TenantConfig {
|
||||||
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
||||||
@@ -1000,6 +1023,31 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
Command::Safekeepers {} => {
|
||||||
|
let mut resp = storcon_client
|
||||||
|
.dispatch::<(), Vec<SafekeeperDescribeResponse>>(
|
||||||
|
Method::GET,
|
||||||
|
"control/v1/safekeeper".to_string(),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
resp.sort_by(|a, b| a.id.cmp(&b.id));
|
||||||
|
|
||||||
|
let mut table = comfy_table::Table::new();
|
||||||
|
table.set_header(["Id", "Version", "Host", "Port", "Http Port", "AZ Id"]);
|
||||||
|
for sk in resp {
|
||||||
|
table.add_row([
|
||||||
|
format!("{}", sk.id),
|
||||||
|
format!("{}", sk.version),
|
||||||
|
sk.host,
|
||||||
|
format!("{}", sk.port),
|
||||||
|
format!("{}", sk.http_port),
|
||||||
|
sk.availability_zone_id.to_string(),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
println!("{table}");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ allow = [
|
|||||||
"MPL-2.0",
|
"MPL-2.0",
|
||||||
"OpenSSL",
|
"OpenSSL",
|
||||||
"Unicode-DFS-2016",
|
"Unicode-DFS-2016",
|
||||||
|
"Unicode-3.0",
|
||||||
]
|
]
|
||||||
confidence-threshold = 0.8
|
confidence-threshold = 0.8
|
||||||
exceptions = [
|
exceptions = [
|
||||||
|
|||||||
@@ -132,11 +132,6 @@
|
|||||||
"name": "cron.database",
|
"name": "cron.database",
|
||||||
"value": "postgres",
|
"value": "postgres",
|
||||||
"vartype": "string"
|
"vartype": "string"
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "session_preload_libraries",
|
|
||||||
"value": "anon",
|
|
||||||
"vartype": "string"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -35,11 +35,11 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
echo "clean up containers if exists"
|
echo "clean up containers if exists"
|
||||||
cleanup
|
cleanup
|
||||||
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
||||||
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
# The support of pg_anon not yet added to PG17, so we have to add the corresponding option for other PG versions
|
||||||
if [ $pg_version -eq 17 ]; then
|
if [ "${pg_version}" -ne 17 ]; then
|
||||||
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
||||||
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
||||||
jq 'del(.cluster.settings[] | select (.name == "session_preload_libraries"))' $SPEC_PATH/spec.bak > $SPEC_PATH/spec.json
|
jq '.cluster.settings += [{"name": "session_preload_libraries","value": "anon","vartype": "string"}]' "${SPEC_PATH}/spec.bak" > "${SPEC_PATH}/spec.json"
|
||||||
fi
|
fi
|
||||||
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
||||||
|
|
||||||
@@ -106,8 +106,8 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
cleanup
|
cleanup
|
||||||
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
# Restore the original spec.json
|
||||||
if [ $pg_version -eq 17 ]; then
|
if [ "$pg_version" -ne 17 ]; then
|
||||||
mv $SPEC_PATH/spec.bak $SPEC_PATH/spec.json
|
mv "$SPEC_PATH/spec.bak" "$SPEC_PATH/spec.json"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
@@ -163,8 +162,9 @@ pub enum ControlPlaneComputeStatus {
|
|||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
pub struct InstalledExtension {
|
pub struct InstalledExtension {
|
||||||
pub extname: String,
|
pub extname: String,
|
||||||
pub versions: HashSet<String>,
|
pub version: String,
|
||||||
pub n_databases: u32, // Number of databases using this extension
|
pub n_databases: u32, // Number of databases using this extension
|
||||||
|
pub owned_by_superuser: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
|
|||||||
@@ -67,6 +67,15 @@ pub struct ComputeSpec {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disk_quota_bytes: Option<u64>,
|
pub disk_quota_bytes: Option<u64>,
|
||||||
|
|
||||||
|
/// Disables the vm-monitor behavior that resizes LFC on upscale/downscale, instead relying on
|
||||||
|
/// the initial size of LFC.
|
||||||
|
///
|
||||||
|
/// This is intended for use when the LFC size is being overridden from the default but
|
||||||
|
/// autoscaling is still enabled, and we don't want the vm-monitor to interfere with the custom
|
||||||
|
/// LFC sizing.
|
||||||
|
#[serde(default)]
|
||||||
|
pub disable_lfc_resizing: Option<bool>,
|
||||||
|
|
||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ impl Timing {
|
|||||||
|
|
||||||
/// Return true if there is a ready event.
|
/// Return true if there is a ready event.
|
||||||
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
||||||
queue.peek().map_or(false, |x| x.time <= self.now())
|
queue.peek().is_some_and(|x| x.time <= self.now())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear all pending events.
|
/// Clear all pending events.
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ pub struct TenantPolicyRequest {
|
|||||||
pub scheduling: Option<ShardSchedulingPolicy>,
|
pub scheduling: Option<ShardSchedulingPolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
|
||||||
pub struct AvailabilityZone(pub String);
|
pub struct AvailabilityZone(pub String);
|
||||||
|
|
||||||
impl Display for AvailabilityZone {
|
impl Display for AvailabilityZone {
|
||||||
@@ -245,6 +245,17 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Scheduling policy enables us to selectively disable some automatic actions that the
|
||||||
|
/// controller performs on a tenant shard. This is only set to a non-default value by
|
||||||
|
/// human intervention, and it is reset to the default value (Active) when the tenant's
|
||||||
|
/// placement policy is modified away from Attached.
|
||||||
|
///
|
||||||
|
/// The typical use of a non-Active scheduling policy is one of:
|
||||||
|
/// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
|
||||||
|
/// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
|
||||||
|
///
|
||||||
|
/// If you're not sure which policy to use to pin a shard to its current location, you probably
|
||||||
|
/// want Pause.
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub enum ShardSchedulingPolicy {
|
pub enum ShardSchedulingPolicy {
|
||||||
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
||||||
@@ -361,6 +372,23 @@ pub struct MetadataHealthListOutdatedResponse {
|
|||||||
pub health_records: Vec<MetadataHealthRecord>,
|
pub health_records: Vec<MetadataHealthRecord>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Publicly exposed safekeeper description
|
||||||
|
///
|
||||||
|
/// The `active` flag which we have in the DB is not included on purpose: it is deprecated.
|
||||||
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
|
pub struct SafekeeperDescribeResponse {
|
||||||
|
pub id: NodeId,
|
||||||
|
pub region_id: String,
|
||||||
|
/// 1 is special, it means just created (not currently posted to storcon).
|
||||||
|
/// Zero or negative is not really expected.
|
||||||
|
/// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
|
||||||
|
pub version: i64,
|
||||||
|
pub host: String,
|
||||||
|
pub port: i32,
|
||||||
|
pub http_port: i32,
|
||||||
|
pub availability_zone_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub struct Key {
|
|||||||
|
|
||||||
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
||||||
/// a struct of fields.
|
/// a struct of fields.
|
||||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug)]
|
||||||
pub struct CompactKey(i128);
|
pub struct CompactKey(i128);
|
||||||
|
|
||||||
/// The storage key size.
|
/// The storage key size.
|
||||||
@@ -565,6 +565,10 @@ impl Key {
|
|||||||
&& self.field5 == 0
|
&& self.field5 == 0
|
||||||
&& self.field6 == u32::MAX
|
&& self.field6 == u32::MAX
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_slru_dir_key(&self) -> bool {
|
||||||
|
slru_dir_kind(self).is_some()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ pub mod utilization;
|
|||||||
use camino::Utf8PathBuf;
|
use camino::Utf8PathBuf;
|
||||||
pub use utilization::PageserverUtilization;
|
pub use utilization::PageserverUtilization;
|
||||||
|
|
||||||
|
use core::ops::Range;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
@@ -17,7 +18,7 @@ use std::{
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use postgres_ffi::BLCKSZ;
|
use postgres_ffi::BLCKSZ;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
use serde_with::serde_as;
|
use serde_with::serde_as;
|
||||||
use utils::{
|
use utils::{
|
||||||
completion,
|
completion,
|
||||||
@@ -28,6 +29,7 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
key::Key,
|
||||||
reltag::RelTag,
|
reltag::RelTag,
|
||||||
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -210,6 +212,68 @@ pub enum TimelineState {
|
|||||||
Broken { reason: String, backtrace: String },
|
Broken { reason: String, backtrace: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[serde_with::serde_as]
|
||||||
|
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
||||||
|
pub struct CompactLsnRange {
|
||||||
|
pub start: Lsn,
|
||||||
|
pub end: Lsn,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_with::serde_as]
|
||||||
|
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
||||||
|
pub struct CompactKeyRange {
|
||||||
|
#[serde_as(as = "serde_with::DisplayFromStr")]
|
||||||
|
pub start: Key,
|
||||||
|
#[serde_as(as = "serde_with::DisplayFromStr")]
|
||||||
|
pub end: Key,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Range<Lsn>> for CompactLsnRange {
|
||||||
|
fn from(range: Range<Lsn>) -> Self {
|
||||||
|
Self {
|
||||||
|
start: range.start,
|
||||||
|
end: range.end,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Range<Key>> for CompactKeyRange {
|
||||||
|
fn from(range: Range<Key>) -> Self {
|
||||||
|
Self {
|
||||||
|
start: range.start,
|
||||||
|
end: range.end,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<CompactLsnRange> for Range<Lsn> {
|
||||||
|
fn from(range: CompactLsnRange) -> Self {
|
||||||
|
range.start..range.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<CompactKeyRange> for Range<Key> {
|
||||||
|
fn from(range: CompactKeyRange) -> Self {
|
||||||
|
range.start..range.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactLsnRange {
|
||||||
|
pub fn above(lsn: Lsn) -> Self {
|
||||||
|
Self {
|
||||||
|
start: lsn,
|
||||||
|
end: Lsn::MAX,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct CompactInfoResponse {
|
||||||
|
pub compact_key_range: Option<CompactKeyRange>,
|
||||||
|
pub compact_lsn_range: Option<CompactLsnRange>,
|
||||||
|
pub sub_compaction: bool,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
pub struct TimelineCreateRequest {
|
pub struct TimelineCreateRequest {
|
||||||
pub new_timeline_id: TimelineId,
|
pub new_timeline_id: TimelineId,
|
||||||
@@ -325,6 +389,115 @@ impl Default for ShardParameters {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone, Eq, PartialEq)]
|
||||||
|
pub enum FieldPatch<T> {
|
||||||
|
Upsert(T),
|
||||||
|
Remove,
|
||||||
|
#[default]
|
||||||
|
Noop,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> FieldPatch<T> {
|
||||||
|
fn is_noop(&self) -> bool {
|
||||||
|
matches!(self, FieldPatch::Noop)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn apply(self, target: &mut Option<T>) {
|
||||||
|
match self {
|
||||||
|
Self::Upsert(v) => *target = Some(v),
|
||||||
|
Self::Remove => *target = None,
|
||||||
|
Self::Noop => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
|
||||||
|
match self {
|
||||||
|
Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
|
||||||
|
Self::Remove => Ok(FieldPatch::<U>::Remove),
|
||||||
|
Self::Noop => Ok(FieldPatch::<U>::Noop),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Option::deserialize(deserializer).map(|opt| match opt {
|
||||||
|
None => FieldPatch::Remove,
|
||||||
|
Some(val) => FieldPatch::Upsert(val),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Serialize> Serialize for FieldPatch<T> {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
match self {
|
||||||
|
FieldPatch::Upsert(val) => serializer.serialize_some(val),
|
||||||
|
FieldPatch::Remove => serializer.serialize_none(),
|
||||||
|
FieldPatch::Noop => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct TenantConfigPatch {
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub checkpoint_distance: FieldPatch<u64>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub checkpoint_timeout: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub compaction_target_size: FieldPatch<u64>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub compaction_period: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub compaction_threshold: FieldPatch<usize>,
|
||||||
|
// defer parsing compaction_algorithm, like eviction_policy
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub gc_horizon: FieldPatch<u64>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub gc_period: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub image_creation_threshold: FieldPatch<usize>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub pitr_interval: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub walreceiver_connect_timeout: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub lagging_wal_timeout: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub eviction_policy: FieldPatch<EvictionPolicy>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub min_resident_size_override: FieldPatch<u64>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub heatmap_period: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub lazy_slru_download: FieldPatch<bool>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub image_layer_creation_check_threshold: FieldPatch<u8>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub lsn_lease_length: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub lsn_lease_length_for_ts: FieldPatch<String>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub timeline_offloading: FieldPatch<bool>,
|
||||||
|
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||||
|
pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
|
||||||
|
}
|
||||||
|
|
||||||
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
||||||
/// simpler types.
|
/// simpler types.
|
||||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
||||||
@@ -356,6 +529,107 @@ pub struct TenantConfig {
|
|||||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TenantConfig {
|
||||||
|
pub fn apply_patch(self, patch: TenantConfigPatch) -> TenantConfig {
|
||||||
|
let Self {
|
||||||
|
mut checkpoint_distance,
|
||||||
|
mut checkpoint_timeout,
|
||||||
|
mut compaction_target_size,
|
||||||
|
mut compaction_period,
|
||||||
|
mut compaction_threshold,
|
||||||
|
mut compaction_algorithm,
|
||||||
|
mut gc_horizon,
|
||||||
|
mut gc_period,
|
||||||
|
mut image_creation_threshold,
|
||||||
|
mut pitr_interval,
|
||||||
|
mut walreceiver_connect_timeout,
|
||||||
|
mut lagging_wal_timeout,
|
||||||
|
mut max_lsn_wal_lag,
|
||||||
|
mut eviction_policy,
|
||||||
|
mut min_resident_size_override,
|
||||||
|
mut evictions_low_residence_duration_metric_threshold,
|
||||||
|
mut heatmap_period,
|
||||||
|
mut lazy_slru_download,
|
||||||
|
mut timeline_get_throttle,
|
||||||
|
mut image_layer_creation_check_threshold,
|
||||||
|
mut lsn_lease_length,
|
||||||
|
mut lsn_lease_length_for_ts,
|
||||||
|
mut timeline_offloading,
|
||||||
|
mut wal_receiver_protocol_override,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
patch.checkpoint_distance.apply(&mut checkpoint_distance);
|
||||||
|
patch.checkpoint_timeout.apply(&mut checkpoint_timeout);
|
||||||
|
patch
|
||||||
|
.compaction_target_size
|
||||||
|
.apply(&mut compaction_target_size);
|
||||||
|
patch.compaction_period.apply(&mut compaction_period);
|
||||||
|
patch.compaction_threshold.apply(&mut compaction_threshold);
|
||||||
|
patch.compaction_algorithm.apply(&mut compaction_algorithm);
|
||||||
|
patch.gc_horizon.apply(&mut gc_horizon);
|
||||||
|
patch.gc_period.apply(&mut gc_period);
|
||||||
|
patch
|
||||||
|
.image_creation_threshold
|
||||||
|
.apply(&mut image_creation_threshold);
|
||||||
|
patch.pitr_interval.apply(&mut pitr_interval);
|
||||||
|
patch
|
||||||
|
.walreceiver_connect_timeout
|
||||||
|
.apply(&mut walreceiver_connect_timeout);
|
||||||
|
patch.lagging_wal_timeout.apply(&mut lagging_wal_timeout);
|
||||||
|
patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
|
||||||
|
patch.eviction_policy.apply(&mut eviction_policy);
|
||||||
|
patch
|
||||||
|
.min_resident_size_override
|
||||||
|
.apply(&mut min_resident_size_override);
|
||||||
|
patch
|
||||||
|
.evictions_low_residence_duration_metric_threshold
|
||||||
|
.apply(&mut evictions_low_residence_duration_metric_threshold);
|
||||||
|
patch.heatmap_period.apply(&mut heatmap_period);
|
||||||
|
patch.lazy_slru_download.apply(&mut lazy_slru_download);
|
||||||
|
patch
|
||||||
|
.timeline_get_throttle
|
||||||
|
.apply(&mut timeline_get_throttle);
|
||||||
|
patch
|
||||||
|
.image_layer_creation_check_threshold
|
||||||
|
.apply(&mut image_layer_creation_check_threshold);
|
||||||
|
patch.lsn_lease_length.apply(&mut lsn_lease_length);
|
||||||
|
patch
|
||||||
|
.lsn_lease_length_for_ts
|
||||||
|
.apply(&mut lsn_lease_length_for_ts);
|
||||||
|
patch.timeline_offloading.apply(&mut timeline_offloading);
|
||||||
|
patch
|
||||||
|
.wal_receiver_protocol_override
|
||||||
|
.apply(&mut wal_receiver_protocol_override);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
checkpoint_distance,
|
||||||
|
checkpoint_timeout,
|
||||||
|
compaction_target_size,
|
||||||
|
compaction_period,
|
||||||
|
compaction_threshold,
|
||||||
|
compaction_algorithm,
|
||||||
|
gc_horizon,
|
||||||
|
gc_period,
|
||||||
|
image_creation_threshold,
|
||||||
|
pitr_interval,
|
||||||
|
walreceiver_connect_timeout,
|
||||||
|
lagging_wal_timeout,
|
||||||
|
max_lsn_wal_lag,
|
||||||
|
eviction_policy,
|
||||||
|
min_resident_size_override,
|
||||||
|
evictions_low_residence_duration_metric_threshold,
|
||||||
|
heatmap_period,
|
||||||
|
lazy_slru_download,
|
||||||
|
timeline_get_throttle,
|
||||||
|
image_layer_creation_check_threshold,
|
||||||
|
lsn_lease_length,
|
||||||
|
lsn_lease_length_for_ts,
|
||||||
|
timeline_offloading,
|
||||||
|
wal_receiver_protocol_override,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The policy for the aux file storage.
|
/// The policy for the aux file storage.
|
||||||
///
|
///
|
||||||
/// It can be switched through `switch_aux_file_policy` tenant config.
|
/// It can be switched through `switch_aux_file_policy` tenant config.
|
||||||
@@ -686,6 +960,14 @@ impl TenantConfigRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct TenantConfigPatchRequest {
|
||||||
|
pub tenant_id: TenantId,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
|
||||||
|
}
|
||||||
|
|
||||||
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
||||||
@@ -1699,4 +1981,45 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_tenant_config_patch_request_serde() {
|
||||||
|
let patch_request = TenantConfigPatchRequest {
|
||||||
|
tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
|
||||||
|
config: TenantConfigPatch {
|
||||||
|
checkpoint_distance: FieldPatch::Upsert(42),
|
||||||
|
gc_horizon: FieldPatch::Remove,
|
||||||
|
compaction_threshold: FieldPatch::Noop,
|
||||||
|
..TenantConfigPatch::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let json = serde_json::to_string(&patch_request).unwrap();
|
||||||
|
|
||||||
|
let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
|
||||||
|
assert_eq!(json, expected);
|
||||||
|
|
||||||
|
let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
|
||||||
|
assert_eq!(decoded.tenant_id, patch_request.tenant_id);
|
||||||
|
assert_eq!(decoded.config, patch_request.config);
|
||||||
|
|
||||||
|
// Now apply the patch to a config to demonstrate semantics
|
||||||
|
|
||||||
|
let base = TenantConfig {
|
||||||
|
checkpoint_distance: Some(28),
|
||||||
|
gc_horizon: Some(100),
|
||||||
|
compaction_target_size: Some(1024),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let expected = TenantConfig {
|
||||||
|
checkpoint_distance: Some(42),
|
||||||
|
gc_horizon: None,
|
||||||
|
..base.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let patched = base.apply_patch(decoded.config);
|
||||||
|
|
||||||
|
assert_eq!(patched, expected);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -173,7 +173,11 @@ impl ShardIdentity {
|
|||||||
|
|
||||||
/// Return true if the key should be stored on all shards, not just one.
|
/// Return true if the key should be stored on all shards, not just one.
|
||||||
pub fn is_key_global(&self, key: &Key) -> bool {
|
pub fn is_key_global(&self, key: &Key) -> bool {
|
||||||
if key.is_slru_block_key() || key.is_slru_segment_size_key() || key.is_aux_file_key() {
|
if key.is_slru_block_key()
|
||||||
|
|| key.is_slru_segment_size_key()
|
||||||
|
|| key.is_aux_file_key()
|
||||||
|
|| key.is_slru_dir_key()
|
||||||
|
{
|
||||||
// Special keys that are only stored on shard 0
|
// Special keys that are only stored on shard 0
|
||||||
false
|
false
|
||||||
} else if key.is_rel_block_key() {
|
} else if key.is_rel_block_key() {
|
||||||
|
|||||||
@@ -9,9 +9,11 @@ regex.workspace = true
|
|||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
|
criterion.workspace = true
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
memoffset.workspace = true
|
memoffset.workspace = true
|
||||||
|
pprof.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
@@ -24,3 +26,7 @@ postgres.workspace = true
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
bindgen.workspace = true
|
bindgen.workspace = true
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "waldecoder"
|
||||||
|
harness = false
|
||||||
|
|||||||
26
libs/postgres_ffi/benches/README.md
Normal file
26
libs/postgres_ffi/benches/README.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
To run benchmarks:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# All benchmarks.
|
||||||
|
cargo bench --package postgres_ffi
|
||||||
|
|
||||||
|
# Specific file.
|
||||||
|
cargo bench --package postgres_ffi --bench waldecoder
|
||||||
|
|
||||||
|
# Specific benchmark.
|
||||||
|
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024
|
||||||
|
|
||||||
|
# List available benchmarks.
|
||||||
|
cargo bench --package postgres_ffi --benches -- --list
|
||||||
|
|
||||||
|
# Generate flamegraph profiles using pprof-rs, profiling for 10 seconds.
|
||||||
|
# Output in target/criterion/*/profile/flamegraph.svg.
|
||||||
|
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024 -- --profile-time 10
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional charts and statistics are available in `target/criterion/report/index.html`.
|
||||||
|
|
||||||
|
Benchmarks are automatically compared against the previous run. To compare against other runs, see
|
||||||
|
`--baseline` and `--save-baseline`.
|
||||||
49
libs/postgres_ffi/benches/waldecoder.rs
Normal file
49
libs/postgres_ffi/benches/waldecoder.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
use std::ffi::CStr;
|
||||||
|
|
||||||
|
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
|
||||||
|
use postgres_ffi::v17::wal_generator::LogicalMessageGenerator;
|
||||||
|
use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
|
||||||
|
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||||
|
use pprof::criterion::{Output, PProfProfiler};
|
||||||
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
|
const KB: usize = 1024;
|
||||||
|
|
||||||
|
// Register benchmarks with Criterion.
|
||||||
|
criterion_group!(
|
||||||
|
name = benches;
|
||||||
|
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||||
|
targets = bench_complete_record,
|
||||||
|
);
|
||||||
|
criterion_main!(benches);
|
||||||
|
|
||||||
|
/// Benchmarks WalStreamDecoder::complete_record() for a logical message of varying size.
|
||||||
|
fn bench_complete_record(c: &mut Criterion) {
|
||||||
|
let mut g = c.benchmark_group("complete_record");
|
||||||
|
for size in [64, KB, 8 * KB, 128 * KB] {
|
||||||
|
// Kind of weird to change the group throughput per benchmark, but it's the only way
|
||||||
|
// to vary it per benchmark. It works.
|
||||||
|
g.throughput(criterion::Throughput::Bytes(size as u64));
|
||||||
|
g.bench_function(format!("size={size}"), |b| run_bench(b, size).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_bench(b: &mut Bencher, size: usize) -> anyhow::Result<()> {
|
||||||
|
const PREFIX: &CStr = c"";
|
||||||
|
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
|
||||||
|
let value = vec![1; value_size];
|
||||||
|
|
||||||
|
let mut decoder = WalStreamDecoder::new(Lsn(0), 170000);
|
||||||
|
let msg = LogicalMessageGenerator::new(PREFIX, &value)
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
|
.encode(Lsn(0));
|
||||||
|
assert_eq!(msg.len(), size);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let msg = msg.clone(); // Bytes::clone() is cheap
|
||||||
|
decoder.complete_record(msg).unwrap();
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -106,11 +106,11 @@ impl<R: RecordGenerator> WalGenerator<R> {
|
|||||||
const TIMELINE_ID: u32 = 1;
|
const TIMELINE_ID: u32 = 1;
|
||||||
|
|
||||||
/// Creates a new WAL generator with the given record generator.
|
/// Creates a new WAL generator with the given record generator.
|
||||||
pub fn new(record_generator: R) -> WalGenerator<R> {
|
pub fn new(record_generator: R, start_lsn: Lsn) -> WalGenerator<R> {
|
||||||
Self {
|
Self {
|
||||||
record_generator,
|
record_generator,
|
||||||
lsn: Lsn(0),
|
lsn: start_lsn,
|
||||||
prev_lsn: Lsn(0),
|
prev_lsn: start_lsn,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,6 +231,22 @@ impl LogicalMessageGenerator {
|
|||||||
};
|
};
|
||||||
[&header.encode(), prefix, message].concat().into()
|
[&header.encode(), prefix, message].concat().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Computes how large a value must be to get a record of the given size. Convenience method to
|
||||||
|
/// construct records of pre-determined size. Panics if the record size is too small.
|
||||||
|
pub fn make_value_size(record_size: usize, prefix: &CStr) -> usize {
|
||||||
|
let xlog_header_size = XLOG_SIZE_OF_XLOG_RECORD;
|
||||||
|
let lm_header_size = size_of::<XlLogicalMessage>();
|
||||||
|
let prefix_size = prefix.to_bytes_with_nul().len();
|
||||||
|
let data_header_size = match record_size - xlog_header_size - 2 {
|
||||||
|
0..=255 => 2,
|
||||||
|
256..=258 => panic!("impossible record_size {record_size}"),
|
||||||
|
259.. => 5,
|
||||||
|
};
|
||||||
|
record_size
|
||||||
|
.checked_sub(xlog_header_size + lm_header_size + prefix_size + data_header_size)
|
||||||
|
.expect("record_size too small")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for LogicalMessageGenerator {
|
impl Iterator for LogicalMessageGenerator {
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ fn test_end_of_wal<C: crate::Crafter>(test_name: &str) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let mut f = File::options().write(true).open(file.path()).unwrap();
|
let mut f = File::options().write(true).open(file.path()).unwrap();
|
||||||
const ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE];
|
static ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE];
|
||||||
f.write_all(
|
f.write_all(
|
||||||
&ZEROS[0..min(
|
&ZEROS[0..min(
|
||||||
WAL_SEGMENT_SIZE,
|
WAL_SEGMENT_SIZE,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "postgres-protocol2"
|
name = "postgres-protocol2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -9,8 +9,7 @@
|
|||||||
//!
|
//!
|
||||||
//! This library assumes that the `client_encoding` backend parameter has been
|
//! This library assumes that the `client_encoding` backend parameter has been
|
||||||
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
|
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
|
||||||
#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")]
|
#![warn(missing_docs, clippy::all)]
|
||||||
#![warn(missing_docs, rust_2018_idioms, clippy::all)]
|
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use bytes::{BufMut, BytesMut};
|
use bytes::{BufMut, BytesMut};
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::marker;
|
use std::marker;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "postgres-types2"
|
name = "postgres-types2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -2,8 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
|
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
|
||||||
//! unless you want to define your own `ToSql` or `FromSql` definitions.
|
//! unless you want to define your own `ToSql` or `FromSql` definitions.
|
||||||
#![doc(html_root_url = "https://docs.rs/postgres-types/0.2")]
|
#![warn(clippy::all, missing_docs)]
|
||||||
#![warn(clippy::all, rust_2018_idioms, missing_docs)]
|
|
||||||
|
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use postgres_protocol2::types;
|
use postgres_protocol2::types;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tokio-postgres2"
|
name = "tokio-postgres2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -4,18 +4,23 @@ use crate::config::Host;
|
|||||||
use crate::config::SslMode;
|
use crate::config::SslMode;
|
||||||
use crate::connection::{Request, RequestMessages};
|
use crate::connection::{Request, RequestMessages};
|
||||||
|
|
||||||
use crate::types::{Oid, Type};
|
use crate::query::RowStream;
|
||||||
|
use crate::simple_query::SimpleQueryStream;
|
||||||
|
|
||||||
|
use crate::types::{Oid, ToSql, Type};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
simple_query, CancelToken, Error, ReadyForQueryStatus, Statement, Transaction,
|
prepare, query, simple_query, slice_iter, CancelToken, Error, ReadyForQueryStatus, Row,
|
||||||
TransactionBuilder,
|
SimpleQueryMessage, Statement, ToStatement, Transaction, TransactionBuilder,
|
||||||
};
|
};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use futures_util::{future, ready};
|
use futures_util::{future, ready, TryStreamExt};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use postgres_protocol2::message::{backend::Message, frontend};
|
use postgres_protocol2::message::{backend::Message, frontend};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
@@ -50,7 +55,7 @@ impl Responses {
|
|||||||
/// A cache of type info and prepared statements for fetching type info
|
/// A cache of type info and prepared statements for fetching type info
|
||||||
/// (corresponding to the queries in the [prepare] module).
|
/// (corresponding to the queries in the [prepare] module).
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(crate) struct CachedTypeInfo {
|
struct CachedTypeInfo {
|
||||||
/// A statement for basic information for a type from its
|
/// A statement for basic information for a type from its
|
||||||
/// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its
|
/// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its
|
||||||
/// fallback).
|
/// fallback).
|
||||||
@@ -66,45 +71,13 @@ pub(crate) struct CachedTypeInfo {
|
|||||||
/// Cache of types already looked up.
|
/// Cache of types already looked up.
|
||||||
types: HashMap<Oid, Type>,
|
types: HashMap<Oid, Type>,
|
||||||
}
|
}
|
||||||
impl CachedTypeInfo {
|
|
||||||
pub(crate) fn typeinfo(&mut self) -> Option<&Statement> {
|
|
||||||
self.typeinfo.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn set_typeinfo(&mut self, statement: Statement) -> &Statement {
|
|
||||||
self.typeinfo.insert(statement)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn typeinfo_composite(&mut self) -> Option<&Statement> {
|
|
||||||
self.typeinfo_composite.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn set_typeinfo_composite(&mut self, statement: Statement) -> &Statement {
|
|
||||||
self.typeinfo_composite.insert(statement)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn typeinfo_enum(&mut self) -> Option<&Statement> {
|
|
||||||
self.typeinfo_enum.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn set_typeinfo_enum(&mut self, statement: Statement) -> &Statement {
|
|
||||||
self.typeinfo_enum.insert(statement)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn type_(&mut self, oid: Oid) -> Option<Type> {
|
|
||||||
self.types.get(&oid).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn set_type(&mut self, oid: Oid, type_: &Type) {
|
|
||||||
self.types.insert(oid, type_.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct InnerClient {
|
pub struct InnerClient {
|
||||||
sender: mpsc::UnboundedSender<Request>,
|
sender: mpsc::UnboundedSender<Request>,
|
||||||
|
cached_typeinfo: Mutex<CachedTypeInfo>,
|
||||||
|
|
||||||
/// A buffer to use when writing out postgres commands.
|
/// A buffer to use when writing out postgres commands.
|
||||||
buffer: BytesMut,
|
buffer: Mutex<BytesMut>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerClient {
|
impl InnerClient {
|
||||||
@@ -119,14 +92,47 @@ impl InnerClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn typeinfo(&self) -> Option<Statement> {
|
||||||
|
self.cached_typeinfo.lock().typeinfo.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_typeinfo(&self, statement: &Statement) {
|
||||||
|
self.cached_typeinfo.lock().typeinfo = Some(statement.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn typeinfo_composite(&self) -> Option<Statement> {
|
||||||
|
self.cached_typeinfo.lock().typeinfo_composite.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_typeinfo_composite(&self, statement: &Statement) {
|
||||||
|
self.cached_typeinfo.lock().typeinfo_composite = Some(statement.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn typeinfo_enum(&self) -> Option<Statement> {
|
||||||
|
self.cached_typeinfo.lock().typeinfo_enum.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_typeinfo_enum(&self, statement: &Statement) {
|
||||||
|
self.cached_typeinfo.lock().typeinfo_enum = Some(statement.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn type_(&self, oid: Oid) -> Option<Type> {
|
||||||
|
self.cached_typeinfo.lock().types.get(&oid).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_type(&self, oid: Oid, type_: &Type) {
|
||||||
|
self.cached_typeinfo.lock().types.insert(oid, type_.clone());
|
||||||
|
}
|
||||||
|
|
||||||
/// Call the given function with a buffer to be used when writing out
|
/// Call the given function with a buffer to be used when writing out
|
||||||
/// postgres commands.
|
/// postgres commands.
|
||||||
pub fn with_buf<F, R>(&mut self, f: F) -> R
|
pub fn with_buf<F, R>(&self, f: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut BytesMut) -> R,
|
F: FnOnce(&mut BytesMut) -> R,
|
||||||
{
|
{
|
||||||
let r = f(&mut self.buffer);
|
let mut buffer = self.buffer.lock();
|
||||||
self.buffer.clear();
|
let r = f(&mut buffer);
|
||||||
|
buffer.clear();
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,8 +150,7 @@ pub struct SocketConfig {
|
|||||||
/// The client is one half of what is returned when a connection is established. Users interact with the database
|
/// The client is one half of what is returned when a connection is established. Users interact with the database
|
||||||
/// through this client object.
|
/// through this client object.
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
pub(crate) inner: InnerClient,
|
inner: Arc<InnerClient>,
|
||||||
pub(crate) cached_typeinfo: CachedTypeInfo,
|
|
||||||
|
|
||||||
socket_config: SocketConfig,
|
socket_config: SocketConfig,
|
||||||
ssl_mode: SslMode,
|
ssl_mode: SslMode,
|
||||||
@@ -162,11 +167,11 @@ impl Client {
|
|||||||
secret_key: i32,
|
secret_key: i32,
|
||||||
) -> Client {
|
) -> Client {
|
||||||
Client {
|
Client {
|
||||||
inner: InnerClient {
|
inner: Arc::new(InnerClient {
|
||||||
sender,
|
sender,
|
||||||
|
cached_typeinfo: Default::default(),
|
||||||
buffer: Default::default(),
|
buffer: Default::default(),
|
||||||
},
|
}),
|
||||||
cached_typeinfo: Default::default(),
|
|
||||||
|
|
||||||
socket_config,
|
socket_config,
|
||||||
ssl_mode,
|
ssl_mode,
|
||||||
@@ -180,6 +185,161 @@ impl Client {
|
|||||||
self.process_id
|
self.process_id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn inner(&self) -> &Arc<InnerClient> {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new prepared statement.
|
||||||
|
///
|
||||||
|
/// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc),
|
||||||
|
/// which are set when executed. Prepared statements can only be used with the connection that created them.
|
||||||
|
pub async fn prepare(&self, query: &str) -> Result<Statement, Error> {
|
||||||
|
self.prepare_typed(query, &[]).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `prepare`, but allows the types of query parameters to be explicitly specified.
|
||||||
|
///
|
||||||
|
/// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be
|
||||||
|
/// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`.
|
||||||
|
pub async fn prepare_typed(
|
||||||
|
&self,
|
||||||
|
query: &str,
|
||||||
|
parameter_types: &[Type],
|
||||||
|
) -> Result<Statement, Error> {
|
||||||
|
prepare::prepare(&self.inner, query, parameter_types).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes a statement, returning a vector of the resulting rows.
|
||||||
|
///
|
||||||
|
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
||||||
|
/// provided, 1-indexed.
|
||||||
|
///
|
||||||
|
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
||||||
|
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
||||||
|
/// with the `prepare` method.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the number of parameters provided does not match the number expected.
|
||||||
|
pub async fn query<T>(
|
||||||
|
&self,
|
||||||
|
statement: &T,
|
||||||
|
params: &[&(dyn ToSql + Sync)],
|
||||||
|
) -> Result<Vec<Row>, Error>
|
||||||
|
where
|
||||||
|
T: ?Sized + ToStatement,
|
||||||
|
{
|
||||||
|
self.query_raw(statement, slice_iter(params))
|
||||||
|
.await?
|
||||||
|
.try_collect()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximally flexible version of [`query`].
|
||||||
|
///
|
||||||
|
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
||||||
|
/// provided, 1-indexed.
|
||||||
|
///
|
||||||
|
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
||||||
|
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
||||||
|
/// with the `prepare` method.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the number of parameters provided does not match the number expected.
|
||||||
|
///
|
||||||
|
/// [`query`]: #method.query
|
||||||
|
pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<RowStream, Error>
|
||||||
|
where
|
||||||
|
T: ?Sized + ToStatement,
|
||||||
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
|
I::IntoIter: ExactSizeIterator,
|
||||||
|
{
|
||||||
|
let statement = statement.__convert().into_statement(self).await?;
|
||||||
|
query::query(&self.inner, statement, params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pass text directly to the Postgres backend to allow it to sort out typing itself and
|
||||||
|
/// to save a roundtrip
|
||||||
|
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
|
where
|
||||||
|
S: AsRef<str>,
|
||||||
|
I: IntoIterator<Item = Option<S>>,
|
||||||
|
I::IntoIter: ExactSizeIterator,
|
||||||
|
{
|
||||||
|
query::query_txt(&self.inner, statement, params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes a statement, returning the number of rows modified.
|
||||||
|
///
|
||||||
|
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
||||||
|
/// provided, 1-indexed.
|
||||||
|
///
|
||||||
|
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
||||||
|
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
||||||
|
/// with the `prepare` method.
|
||||||
|
///
|
||||||
|
/// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the number of parameters provided does not match the number expected.
|
||||||
|
pub async fn execute<T>(
|
||||||
|
&self,
|
||||||
|
statement: &T,
|
||||||
|
params: &[&(dyn ToSql + Sync)],
|
||||||
|
) -> Result<u64, Error>
|
||||||
|
where
|
||||||
|
T: ?Sized + ToStatement,
|
||||||
|
{
|
||||||
|
self.execute_raw(statement, slice_iter(params)).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximally flexible version of [`execute`].
|
||||||
|
///
|
||||||
|
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
||||||
|
/// provided, 1-indexed.
|
||||||
|
///
|
||||||
|
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
||||||
|
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
||||||
|
/// with the `prepare` method.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the number of parameters provided does not match the number expected.
|
||||||
|
///
|
||||||
|
/// [`execute`]: #method.execute
|
||||||
|
pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<u64, Error>
|
||||||
|
where
|
||||||
|
T: ?Sized + ToStatement,
|
||||||
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
|
I::IntoIter: ExactSizeIterator,
|
||||||
|
{
|
||||||
|
let statement = statement.__convert().into_statement(self).await?;
|
||||||
|
query::execute(self.inner(), statement, params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows.
|
||||||
|
///
|
||||||
|
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
||||||
|
/// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings,
|
||||||
|
/// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the
|
||||||
|
/// rows, this method returns a list of an enum which indicates either the completion of one of the commands,
|
||||||
|
/// or a row of data. This preserves the framing between the separate statements in the request.
|
||||||
|
///
|
||||||
|
/// # Warning
|
||||||
|
///
|
||||||
|
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
||||||
|
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
||||||
|
/// them to this method!
|
||||||
|
pub async fn simple_query(&self, query: &str) -> Result<Vec<SimpleQueryMessage>, Error> {
|
||||||
|
self.simple_query_raw(query).await?.try_collect().await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn simple_query_raw(&self, query: &str) -> Result<SimpleQueryStream, Error> {
|
||||||
|
simple_query::simple_query(self.inner(), query).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Executes a sequence of SQL statements using the simple query protocol.
|
/// Executes a sequence of SQL statements using the simple query protocol.
|
||||||
///
|
///
|
||||||
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
||||||
@@ -190,8 +350,8 @@ impl Client {
|
|||||||
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
||||||
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
||||||
/// them to this method!
|
/// them to this method!
|
||||||
pub async fn batch_execute(&mut self, query: &str) -> Result<ReadyForQueryStatus, Error> {
|
pub async fn batch_execute(&self, query: &str) -> Result<ReadyForQueryStatus, Error> {
|
||||||
simple_query::batch_execute(&mut self.inner, query).await
|
simple_query::batch_execute(self.inner(), query).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Begins a new database transaction.
|
/// Begins a new database transaction.
|
||||||
@@ -199,7 +359,7 @@ impl Client {
|
|||||||
/// The transaction will roll back by default - use the `commit` method to commit it.
|
/// The transaction will roll back by default - use the `commit` method to commit it.
|
||||||
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
|
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
|
||||||
struct RollbackIfNotDone<'me> {
|
struct RollbackIfNotDone<'me> {
|
||||||
client: &'me mut Client,
|
client: &'me Client,
|
||||||
done: bool,
|
done: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -209,13 +369,13 @@ impl Client {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let buf = self.client.inner.with_buf(|buf| {
|
let buf = self.client.inner().with_buf(|buf| {
|
||||||
frontend::query("ROLLBACK", buf).unwrap();
|
frontend::query("ROLLBACK", buf).unwrap();
|
||||||
buf.split().freeze()
|
buf.split().freeze()
|
||||||
});
|
});
|
||||||
let _ = self
|
let _ = self
|
||||||
.client
|
.client
|
||||||
.inner
|
.inner()
|
||||||
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -230,7 +390,7 @@ impl Client {
|
|||||||
client: self,
|
client: self,
|
||||||
done: false,
|
done: false,
|
||||||
};
|
};
|
||||||
cleaner.client.batch_execute("BEGIN").await?;
|
self.batch_execute("BEGIN").await?;
|
||||||
cleaner.done = true;
|
cleaner.done = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,6 +416,11 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Query for type information
|
||||||
|
pub async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
||||||
|
crate::prepare::get_type(&self.inner, oid).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Determines if the connection to the server has already closed.
|
/// Determines if the connection to the server has already closed.
|
||||||
///
|
///
|
||||||
/// In that case, all future queries will fail.
|
/// In that case, all future queries will fail.
|
||||||
|
|||||||
@@ -33,10 +33,14 @@ pub struct Response {
|
|||||||
#[derive(PartialEq, Debug)]
|
#[derive(PartialEq, Debug)]
|
||||||
enum State {
|
enum State {
|
||||||
Active,
|
Active,
|
||||||
Terminating,
|
|
||||||
Closing,
|
Closing,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum WriteReady {
|
||||||
|
Terminating,
|
||||||
|
WaitingOnRead,
|
||||||
|
}
|
||||||
|
|
||||||
/// A connection to a PostgreSQL database.
|
/// A connection to a PostgreSQL database.
|
||||||
///
|
///
|
||||||
/// This is one half of what is returned when a new connection is established. It performs the actual IO with the
|
/// This is one half of what is returned when a new connection is established. It performs the actual IO with the
|
||||||
@@ -51,7 +55,6 @@ pub struct Connection<S, T> {
|
|||||||
/// HACK: we need this in the Neon Proxy to forward params.
|
/// HACK: we need this in the Neon Proxy to forward params.
|
||||||
pub parameters: HashMap<String, String>,
|
pub parameters: HashMap<String, String>,
|
||||||
receiver: mpsc::UnboundedReceiver<Request>,
|
receiver: mpsc::UnboundedReceiver<Request>,
|
||||||
pending_request: Option<RequestMessages>,
|
|
||||||
pending_responses: VecDeque<BackendMessage>,
|
pending_responses: VecDeque<BackendMessage>,
|
||||||
responses: VecDeque<Response>,
|
responses: VecDeque<Response>,
|
||||||
state: State,
|
state: State,
|
||||||
@@ -72,7 +75,6 @@ where
|
|||||||
stream,
|
stream,
|
||||||
parameters,
|
parameters,
|
||||||
receiver,
|
receiver,
|
||||||
pending_request: None,
|
|
||||||
pending_responses,
|
pending_responses,
|
||||||
responses: VecDeque::new(),
|
responses: VecDeque::new(),
|
||||||
state: State::Active,
|
state: State::Active,
|
||||||
@@ -93,26 +95,23 @@ where
|
|||||||
.map(|o| o.map(|r| r.map_err(Error::io)))
|
.map(|o| o.map(|r| r.map_err(Error::io)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_read(&mut self, cx: &mut Context<'_>) -> Result<Option<AsyncMessage>, Error> {
|
/// Read and process messages from the connection to postgres.
|
||||||
if self.state != State::Active {
|
/// client <- postgres
|
||||||
trace!("poll_read: done");
|
fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<AsyncMessage, Error>> {
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let message = match self.poll_response(cx)? {
|
let message = match self.poll_response(cx)? {
|
||||||
Poll::Ready(Some(message)) => message,
|
Poll::Ready(Some(message)) => message,
|
||||||
Poll::Ready(None) => return Err(Error::closed()),
|
Poll::Ready(None) => return Poll::Ready(Err(Error::closed())),
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
trace!("poll_read: waiting on response");
|
trace!("poll_read: waiting on response");
|
||||||
return Ok(None);
|
return Poll::Pending;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let (mut messages, request_complete) = match message {
|
let (mut messages, request_complete) = match message {
|
||||||
BackendMessage::Async(Message::NoticeResponse(body)) => {
|
BackendMessage::Async(Message::NoticeResponse(body)) => {
|
||||||
let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?;
|
let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?;
|
||||||
return Ok(Some(AsyncMessage::Notice(error)));
|
return Poll::Ready(Ok(AsyncMessage::Notice(error)));
|
||||||
}
|
}
|
||||||
BackendMessage::Async(Message::NotificationResponse(body)) => {
|
BackendMessage::Async(Message::NotificationResponse(body)) => {
|
||||||
let notification = Notification {
|
let notification = Notification {
|
||||||
@@ -120,7 +119,7 @@ where
|
|||||||
channel: body.channel().map_err(Error::parse)?.to_string(),
|
channel: body.channel().map_err(Error::parse)?.to_string(),
|
||||||
payload: body.message().map_err(Error::parse)?.to_string(),
|
payload: body.message().map_err(Error::parse)?.to_string(),
|
||||||
};
|
};
|
||||||
return Ok(Some(AsyncMessage::Notification(notification)));
|
return Poll::Ready(Ok(AsyncMessage::Notification(notification)));
|
||||||
}
|
}
|
||||||
BackendMessage::Async(Message::ParameterStatus(body)) => {
|
BackendMessage::Async(Message::ParameterStatus(body)) => {
|
||||||
self.parameters.insert(
|
self.parameters.insert(
|
||||||
@@ -139,8 +138,10 @@ where
|
|||||||
let mut response = match self.responses.pop_front() {
|
let mut response = match self.responses.pop_front() {
|
||||||
Some(response) => response,
|
Some(response) => response,
|
||||||
None => match messages.next().map_err(Error::parse)? {
|
None => match messages.next().map_err(Error::parse)? {
|
||||||
Some(Message::ErrorResponse(error)) => return Err(Error::db(error)),
|
Some(Message::ErrorResponse(error)) => {
|
||||||
_ => return Err(Error::unexpected_message()),
|
return Poll::Ready(Err(Error::db(error)))
|
||||||
|
}
|
||||||
|
_ => return Poll::Ready(Err(Error::unexpected_message())),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -164,18 +165,14 @@ where
|
|||||||
request_complete,
|
request_complete,
|
||||||
});
|
});
|
||||||
trace!("poll_read: waiting on sender");
|
trace!("poll_read: waiting on sender");
|
||||||
return Ok(None);
|
return Poll::Pending;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fetch the next client request and enqueue the response sender.
|
||||||
fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll<Option<RequestMessages>> {
|
fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll<Option<RequestMessages>> {
|
||||||
if let Some(messages) = self.pending_request.take() {
|
|
||||||
trace!("retrying pending request");
|
|
||||||
return Poll::Ready(Some(messages));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.receiver.is_closed() {
|
if self.receiver.is_closed() {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
}
|
}
|
||||||
@@ -193,74 +190,80 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_write(&mut self, cx: &mut Context<'_>) -> Result<bool, Error> {
|
/// Process client requests and write them to the postgres connection, flushing if necessary.
|
||||||
|
/// client -> postgres
|
||||||
|
fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<Result<WriteReady, Error>> {
|
||||||
loop {
|
loop {
|
||||||
if self.state == State::Closing {
|
|
||||||
trace!("poll_write: done");
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if Pin::new(&mut self.stream)
|
if Pin::new(&mut self.stream)
|
||||||
.poll_ready(cx)
|
.poll_ready(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
.is_pending()
|
.is_pending()
|
||||||
{
|
{
|
||||||
trace!("poll_write: waiting on socket");
|
trace!("poll_write: waiting on socket");
|
||||||
return Ok(false);
|
|
||||||
|
// poll_ready is self-flushing.
|
||||||
|
return Poll::Pending;
|
||||||
}
|
}
|
||||||
|
|
||||||
let request = match self.poll_request(cx) {
|
match self.poll_request(cx) {
|
||||||
Poll::Ready(Some(request)) => request,
|
// send the message to postgres
|
||||||
Poll::Ready(None) if self.responses.is_empty() && self.state == State::Active => {
|
Poll::Ready(Some(RequestMessages::Single(request))) => {
|
||||||
|
Pin::new(&mut self.stream)
|
||||||
|
.start_send(request)
|
||||||
|
.map_err(Error::io)?;
|
||||||
|
}
|
||||||
|
// No more messages from the client, and no more responses to wait for.
|
||||||
|
// Send a terminate message to postgres
|
||||||
|
Poll::Ready(None) if self.responses.is_empty() => {
|
||||||
trace!("poll_write: at eof, terminating");
|
trace!("poll_write: at eof, terminating");
|
||||||
self.state = State::Terminating;
|
|
||||||
let mut request = BytesMut::new();
|
let mut request = BytesMut::new();
|
||||||
frontend::terminate(&mut request);
|
frontend::terminate(&mut request);
|
||||||
RequestMessages::Single(FrontendMessage::Raw(request.freeze()))
|
let request = FrontendMessage::Raw(request.freeze());
|
||||||
|
|
||||||
|
Pin::new(&mut self.stream)
|
||||||
|
.start_send(request)
|
||||||
|
.map_err(Error::io)?;
|
||||||
|
|
||||||
|
trace!("poll_write: sent eof, closing");
|
||||||
|
trace!("poll_write: done");
|
||||||
|
return Poll::Ready(Ok(WriteReady::Terminating));
|
||||||
}
|
}
|
||||||
|
// No more messages from the client, but there are still some responses to wait for.
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(None) => {
|
||||||
trace!(
|
trace!(
|
||||||
"poll_write: at eof, pending responses {}",
|
"poll_write: at eof, pending responses {}",
|
||||||
self.responses.len()
|
self.responses.len()
|
||||||
);
|
);
|
||||||
return Ok(true);
|
ready!(self.poll_flush(cx))?;
|
||||||
|
return Poll::Ready(Ok(WriteReady::WaitingOnRead));
|
||||||
}
|
}
|
||||||
|
// Still waiting for a message from the client.
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
trace!("poll_write: waiting on request");
|
trace!("poll_write: waiting on request");
|
||||||
return Ok(true);
|
ready!(self.poll_flush(cx))?;
|
||||||
}
|
return Poll::Pending;
|
||||||
};
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RequestMessages::Single(request) => {
|
|
||||||
Pin::new(&mut self.stream)
|
|
||||||
.start_send(request)
|
|
||||||
.map_err(Error::io)?;
|
|
||||||
if self.state == State::Terminating {
|
|
||||||
trace!("poll_write: sent eof, closing");
|
|
||||||
self.state = State::Closing;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Result<(), Error> {
|
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
||||||
match Pin::new(&mut self.stream)
|
match Pin::new(&mut self.stream)
|
||||||
.poll_flush(cx)
|
.poll_flush(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
{
|
{
|
||||||
Poll::Ready(()) => trace!("poll_flush: flushed"),
|
Poll::Ready(()) => {
|
||||||
Poll::Pending => trace!("poll_flush: waiting on socket"),
|
trace!("poll_flush: flushed");
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
Poll::Pending => {
|
||||||
|
trace!("poll_flush: waiting on socket");
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
||||||
if self.state != State::Closing {
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
|
|
||||||
match Pin::new(&mut self.stream)
|
match Pin::new(&mut self.stream)
|
||||||
.poll_close(cx)
|
.poll_close(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
@@ -289,18 +292,30 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut Context<'_>,
|
cx: &mut Context<'_>,
|
||||||
) -> Poll<Option<Result<AsyncMessage, Error>>> {
|
) -> Poll<Option<Result<AsyncMessage, Error>>> {
|
||||||
let message = self.poll_read(cx)?;
|
if self.state != State::Closing {
|
||||||
let want_flush = self.poll_write(cx)?;
|
// if the state is still active, try read from and write to postgres.
|
||||||
if want_flush {
|
let message = self.poll_read(cx)?;
|
||||||
self.poll_flush(cx)?;
|
let closing = self.poll_write(cx)?;
|
||||||
|
if let Poll::Ready(WriteReady::Terminating) = closing {
|
||||||
|
self.state = State::Closing;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Poll::Ready(message) = message {
|
||||||
|
return Poll::Ready(Some(Ok(message)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// poll_read returned Pending.
|
||||||
|
// poll_write returned Pending or Ready(WriteReady::WaitingOnRead).
|
||||||
|
// if poll_write returned Ready(WriteReady::WaitingOnRead), then we are waiting to read more data from postgres.
|
||||||
|
if self.state != State::Closing {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
match message {
|
|
||||||
Some(message) => Poll::Ready(Some(Ok(message))),
|
match self.poll_shutdown(cx) {
|
||||||
None => match self.poll_shutdown(cx) {
|
Poll::Ready(Ok(())) => Poll::Ready(None),
|
||||||
Poll::Ready(Ok(())) => Poll::Ready(None),
|
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
|
||||||
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
|
Poll::Pending => Poll::Pending,
|
||||||
Poll::Pending => Poll::Pending,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::query::{self, RowStream};
|
use crate::query::RowStream;
|
||||||
use crate::types::Type;
|
use crate::types::Type;
|
||||||
use crate::{Client, Error, Transaction};
|
use crate::{Client, Error, Transaction};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -13,32 +13,33 @@ mod private {
|
|||||||
/// This trait is "sealed", and cannot be implemented outside of this crate.
|
/// This trait is "sealed", and cannot be implemented outside of this crate.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait GenericClient: private::Sealed {
|
pub trait GenericClient: private::Sealed {
|
||||||
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
/// Like `Client::query_raw_txt`.
|
||||||
|
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send;
|
I::IntoIter: ExactSizeIterator + Sync + Send;
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error>;
|
async fn get_type(&self, oid: Oid) -> Result<Type, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl private::Sealed for Client {}
|
impl private::Sealed for Client {}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl GenericClient for Client {
|
impl GenericClient for Client {
|
||||||
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send,
|
I::IntoIter: ExactSizeIterator + Sync + Send,
|
||||||
{
|
{
|
||||||
query::query_txt(&mut self.inner, statement, params).await
|
self.query_raw_txt(statement, params).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
|
async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
||||||
crate::prepare::get_type(&mut self.inner, &mut self.cached_typeinfo, oid).await
|
self.get_type(oid).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,18 +48,17 @@ impl private::Sealed for Transaction<'_> {}
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
#[allow(clippy::needless_lifetimes)]
|
#[allow(clippy::needless_lifetimes)]
|
||||||
impl GenericClient for Transaction<'_> {
|
impl GenericClient for Transaction<'_> {
|
||||||
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send,
|
I::IntoIter: ExactSizeIterator + Sync + Send,
|
||||||
{
|
{
|
||||||
query::query_txt(&mut self.client().inner, statement, params).await
|
self.query_raw_txt(statement, params).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
|
async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
||||||
let client = self.client();
|
self.client().get_type(oid).await
|
||||||
crate::prepare::get_type(&mut client.inner, &mut client.cached_typeinfo, oid).await
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
//! An asynchronous, pipelined, PostgreSQL client.
|
//! An asynchronous, pipelined, PostgreSQL client.
|
||||||
#![warn(rust_2018_idioms, clippy::all)]
|
#![warn(clippy::all)]
|
||||||
|
|
||||||
pub use crate::cancel_token::CancelToken;
|
pub use crate::cancel_token::CancelToken;
|
||||||
pub use crate::client::{Client, SocketConfig};
|
pub use crate::client::{Client, SocketConfig};
|
||||||
@@ -10,10 +10,11 @@ use crate::error::DbError;
|
|||||||
pub use crate::error::Error;
|
pub use crate::error::Error;
|
||||||
pub use crate::generic_client::GenericClient;
|
pub use crate::generic_client::GenericClient;
|
||||||
pub use crate::query::RowStream;
|
pub use crate::query::RowStream;
|
||||||
pub use crate::row::Row;
|
pub use crate::row::{Row, SimpleQueryRow};
|
||||||
|
pub use crate::simple_query::SimpleQueryStream;
|
||||||
pub use crate::statement::{Column, Statement};
|
pub use crate::statement::{Column, Statement};
|
||||||
pub use crate::tls::NoTls;
|
pub use crate::tls::NoTls;
|
||||||
// pub use crate::to_statement::ToStatement;
|
pub use crate::to_statement::ToStatement;
|
||||||
pub use crate::transaction::Transaction;
|
pub use crate::transaction::Transaction;
|
||||||
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
|
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
|
||||||
use crate::types::ToSql;
|
use crate::types::ToSql;
|
||||||
@@ -64,7 +65,7 @@ pub mod row;
|
|||||||
mod simple_query;
|
mod simple_query;
|
||||||
mod statement;
|
mod statement;
|
||||||
pub mod tls;
|
pub mod tls;
|
||||||
// mod to_statement;
|
mod to_statement;
|
||||||
mod transaction;
|
mod transaction;
|
||||||
mod transaction_builder;
|
mod transaction_builder;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
@@ -97,6 +98,7 @@ impl Notification {
|
|||||||
/// An asynchronous message from the server.
|
/// An asynchronous message from the server.
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
#[non_exhaustive]
|
||||||
pub enum AsyncMessage {
|
pub enum AsyncMessage {
|
||||||
/// A notice.
|
/// A notice.
|
||||||
///
|
///
|
||||||
@@ -108,6 +110,18 @@ pub enum AsyncMessage {
|
|||||||
Notification(Notification),
|
Notification(Notification),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Message returned by the `SimpleQuery` stream.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[non_exhaustive]
|
||||||
|
pub enum SimpleQueryMessage {
|
||||||
|
/// A row of data.
|
||||||
|
Row(SimpleQueryRow),
|
||||||
|
/// A statement in the query has completed.
|
||||||
|
///
|
||||||
|
/// The number of rows modified or selected is returned.
|
||||||
|
CommandComplete(u64),
|
||||||
|
}
|
||||||
|
|
||||||
fn slice_iter<'a>(
|
fn slice_iter<'a>(
|
||||||
s: &'a [&'a (dyn ToSql + Sync)],
|
s: &'a [&'a (dyn ToSql + Sync)],
|
||||||
) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a {
|
) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::client::{CachedTypeInfo, InnerClient};
|
use crate::client::InnerClient;
|
||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
use crate::error::SqlState;
|
use crate::error::SqlState;
|
||||||
@@ -7,13 +7,14 @@ use crate::{query, slice_iter};
|
|||||||
use crate::{Column, Error, Statement};
|
use crate::{Column, Error, Statement};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use futures_util::{pin_mut, StreamExt, TryStreamExt};
|
use futures_util::{pin_mut, TryStreamExt};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use postgres_protocol2::message::backend::Message;
|
use postgres_protocol2::message::backend::Message;
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::{pin, Pin};
|
use std::pin::Pin;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub(crate) const TYPEINFO_QUERY: &str = "\
|
pub(crate) const TYPEINFO_QUERY: &str = "\
|
||||||
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
|
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
|
||||||
@@ -58,8 +59,7 @@ ORDER BY attnum
|
|||||||
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
pub async fn prepare(
|
pub async fn prepare(
|
||||||
client: &mut InnerClient,
|
client: &Arc<InnerClient>,
|
||||||
cache: &mut CachedTypeInfo,
|
|
||||||
query: &str,
|
query: &str,
|
||||||
types: &[Type],
|
types: &[Type],
|
||||||
) -> Result<Statement, Error> {
|
) -> Result<Statement, Error> {
|
||||||
@@ -86,7 +86,7 @@ pub async fn prepare(
|
|||||||
let mut parameters = vec![];
|
let mut parameters = vec![];
|
||||||
let mut it = parameter_description.parameters();
|
let mut it = parameter_description.parameters();
|
||||||
while let Some(oid) = it.next().map_err(Error::parse)? {
|
while let Some(oid) = it.next().map_err(Error::parse)? {
|
||||||
let type_ = get_type(client, cache, oid).await?;
|
let type_ = get_type(client, oid).await?;
|
||||||
parameters.push(type_);
|
parameters.push(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,30 +94,24 @@ pub async fn prepare(
|
|||||||
if let Some(row_description) = row_description {
|
if let Some(row_description) = row_description {
|
||||||
let mut it = row_description.fields();
|
let mut it = row_description.fields();
|
||||||
while let Some(field) = it.next().map_err(Error::parse)? {
|
while let Some(field) = it.next().map_err(Error::parse)? {
|
||||||
let type_ = get_type(client, cache, field.type_oid()).await?;
|
let type_ = get_type(client, field.type_oid()).await?;
|
||||||
let column = Column::new(field.name().to_string(), type_, field);
|
let column = Column::new(field.name().to_string(), type_, field);
|
||||||
columns.push(column);
|
columns.push(column);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Statement::new(name, parameters, columns))
|
Ok(Statement::new(client, name, parameters, columns))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_rec<'a>(
|
fn prepare_rec<'a>(
|
||||||
client: &'a mut InnerClient,
|
client: &'a Arc<InnerClient>,
|
||||||
cache: &'a mut CachedTypeInfo,
|
|
||||||
query: &'a str,
|
query: &'a str,
|
||||||
types: &'a [Type],
|
types: &'a [Type],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> {
|
) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> {
|
||||||
Box::pin(prepare(client, cache, query, types))
|
Box::pin(prepare(client, query, types))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(
|
fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result<Bytes, Error> {
|
||||||
client: &mut InnerClient,
|
|
||||||
name: &str,
|
|
||||||
query: &str,
|
|
||||||
types: &[Type],
|
|
||||||
) -> Result<Bytes, Error> {
|
|
||||||
if types.is_empty() {
|
if types.is_empty() {
|
||||||
debug!("preparing query {}: {}", name, query);
|
debug!("preparing query {}: {}", name, query);
|
||||||
} else {
|
} else {
|
||||||
@@ -132,20 +126,16 @@ fn encode(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_type(
|
pub async fn get_type(client: &Arc<InnerClient>, oid: Oid) -> Result<Type, Error> {
|
||||||
client: &mut InnerClient,
|
|
||||||
cache: &mut CachedTypeInfo,
|
|
||||||
oid: Oid,
|
|
||||||
) -> Result<Type, Error> {
|
|
||||||
if let Some(type_) = Type::from_oid(oid) {
|
if let Some(type_) = Type::from_oid(oid) {
|
||||||
return Ok(type_);
|
return Ok(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(type_) = cache.type_(oid) {
|
if let Some(type_) = client.type_(oid) {
|
||||||
return Ok(type_);
|
return Ok(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = typeinfo_statement(client, cache).await?;
|
let stmt = typeinfo_statement(client).await?;
|
||||||
|
|
||||||
let rows = query::query(client, stmt, slice_iter(&[&oid])).await?;
|
let rows = query::query(client, stmt, slice_iter(&[&oid])).await?;
|
||||||
pin_mut!(rows);
|
pin_mut!(rows);
|
||||||
@@ -155,141 +145,118 @@ pub async fn get_type(
|
|||||||
None => return Err(Error::unexpected_message()),
|
None => return Err(Error::unexpected_message()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let name: String = row.try_get(stmt.columns(), 0)?;
|
let name: String = row.try_get(0)?;
|
||||||
let type_: i8 = row.try_get(stmt.columns(), 1)?;
|
let type_: i8 = row.try_get(1)?;
|
||||||
let elem_oid: Oid = row.try_get(stmt.columns(), 2)?;
|
let elem_oid: Oid = row.try_get(2)?;
|
||||||
let rngsubtype: Option<Oid> = row.try_get(stmt.columns(), 3)?;
|
let rngsubtype: Option<Oid> = row.try_get(3)?;
|
||||||
let basetype: Oid = row.try_get(stmt.columns(), 4)?;
|
let basetype: Oid = row.try_get(4)?;
|
||||||
let schema: String = row.try_get(stmt.columns(), 5)?;
|
let schema: String = row.try_get(5)?;
|
||||||
let relid: Oid = row.try_get(stmt.columns(), 6)?;
|
let relid: Oid = row.try_get(6)?;
|
||||||
|
|
||||||
let kind = if type_ == b'e' as i8 {
|
let kind = if type_ == b'e' as i8 {
|
||||||
let variants = get_enum_variants(client, cache, oid).await?;
|
let variants = get_enum_variants(client, oid).await?;
|
||||||
Kind::Enum(variants)
|
Kind::Enum(variants)
|
||||||
} else if type_ == b'p' as i8 {
|
} else if type_ == b'p' as i8 {
|
||||||
Kind::Pseudo
|
Kind::Pseudo
|
||||||
} else if basetype != 0 {
|
} else if basetype != 0 {
|
||||||
let type_ = get_type_rec(client, cache, basetype).await?;
|
let type_ = get_type_rec(client, basetype).await?;
|
||||||
Kind::Domain(type_)
|
Kind::Domain(type_)
|
||||||
} else if elem_oid != 0 {
|
} else if elem_oid != 0 {
|
||||||
let type_ = get_type_rec(client, cache, elem_oid).await?;
|
let type_ = get_type_rec(client, elem_oid).await?;
|
||||||
Kind::Array(type_)
|
Kind::Array(type_)
|
||||||
} else if relid != 0 {
|
} else if relid != 0 {
|
||||||
let fields = get_composite_fields(client, cache, relid).await?;
|
let fields = get_composite_fields(client, relid).await?;
|
||||||
Kind::Composite(fields)
|
Kind::Composite(fields)
|
||||||
} else if let Some(rngsubtype) = rngsubtype {
|
} else if let Some(rngsubtype) = rngsubtype {
|
||||||
let type_ = get_type_rec(client, cache, rngsubtype).await?;
|
let type_ = get_type_rec(client, rngsubtype).await?;
|
||||||
Kind::Range(type_)
|
Kind::Range(type_)
|
||||||
} else {
|
} else {
|
||||||
Kind::Simple
|
Kind::Simple
|
||||||
};
|
};
|
||||||
|
|
||||||
let type_ = Type::new(name, oid, kind, schema);
|
let type_ = Type::new(name, oid, kind, schema);
|
||||||
cache.set_type(oid, &type_);
|
client.set_type(oid, &type_);
|
||||||
|
|
||||||
Ok(type_)
|
Ok(type_)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_type_rec<'a>(
|
fn get_type_rec<'a>(
|
||||||
client: &'a mut InnerClient,
|
client: &'a Arc<InnerClient>,
|
||||||
cache: &'a mut CachedTypeInfo,
|
|
||||||
oid: Oid,
|
oid: Oid,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> {
|
||||||
Box::pin(get_type(client, cache, oid))
|
Box::pin(get_type(client, oid))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_statement<'c>(
|
async fn typeinfo_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
||||||
client: &mut InnerClient,
|
if let Some(stmt) = client.typeinfo() {
|
||||||
cache: &'c mut CachedTypeInfo,
|
return Ok(stmt);
|
||||||
) -> Result<&'c Statement, Error> {
|
|
||||||
if cache.typeinfo().is_some() {
|
|
||||||
// needed to get around a borrow checker limitation
|
|
||||||
return Ok(cache.typeinfo().unwrap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = match prepare_rec(client, cache, TYPEINFO_QUERY, &[]).await {
|
let stmt = match prepare_rec(client, TYPEINFO_QUERY, &[]).await {
|
||||||
Ok(stmt) => stmt,
|
Ok(stmt) => stmt,
|
||||||
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {
|
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {
|
||||||
prepare_rec(client, cache, TYPEINFO_FALLBACK_QUERY, &[]).await?
|
prepare_rec(client, TYPEINFO_FALLBACK_QUERY, &[]).await?
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(cache.set_typeinfo(stmt))
|
client.set_typeinfo(&stmt);
|
||||||
|
Ok(stmt)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_enum_variants(
|
async fn get_enum_variants(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<String>, Error> {
|
||||||
client: &mut InnerClient,
|
let stmt = typeinfo_enum_statement(client).await?;
|
||||||
cache: &mut CachedTypeInfo,
|
|
||||||
oid: Oid,
|
|
||||||
) -> Result<Vec<String>, Error> {
|
|
||||||
let stmt = typeinfo_enum_statement(client, cache).await?;
|
|
||||||
|
|
||||||
let mut out = vec![];
|
query::query(client, stmt, slice_iter(&[&oid]))
|
||||||
|
.await?
|
||||||
let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
|
.and_then(|row| async move { row.try_get(0) })
|
||||||
while let Some(row) = rows.next().await {
|
.try_collect()
|
||||||
out.push(row?.try_get(stmt.columns(), 0)?)
|
.await
|
||||||
}
|
|
||||||
Ok(out)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_enum_statement<'c>(
|
async fn typeinfo_enum_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
||||||
client: &mut InnerClient,
|
if let Some(stmt) = client.typeinfo_enum() {
|
||||||
cache: &'c mut CachedTypeInfo,
|
return Ok(stmt);
|
||||||
) -> Result<&'c Statement, Error> {
|
|
||||||
if cache.typeinfo_enum().is_some() {
|
|
||||||
// needed to get around a borrow checker limitation
|
|
||||||
return Ok(cache.typeinfo_enum().unwrap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = match prepare_rec(client, cache, TYPEINFO_ENUM_QUERY, &[]).await {
|
let stmt = match prepare_rec(client, TYPEINFO_ENUM_QUERY, &[]).await {
|
||||||
Ok(stmt) => stmt,
|
Ok(stmt) => stmt,
|
||||||
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => {
|
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => {
|
||||||
prepare_rec(client, cache, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await?
|
prepare_rec(client, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await?
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(cache.set_typeinfo_enum(stmt))
|
client.set_typeinfo_enum(&stmt);
|
||||||
|
Ok(stmt)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_composite_fields(
|
async fn get_composite_fields(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<Field>, Error> {
|
||||||
client: &mut InnerClient,
|
let stmt = typeinfo_composite_statement(client).await?;
|
||||||
cache: &mut CachedTypeInfo,
|
|
||||||
oid: Oid,
|
|
||||||
) -> Result<Vec<Field>, Error> {
|
|
||||||
let stmt = typeinfo_composite_statement(client, cache).await?;
|
|
||||||
|
|
||||||
let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
|
let rows = query::query(client, stmt, slice_iter(&[&oid]))
|
||||||
|
.await?
|
||||||
let mut oids = vec![];
|
.try_collect::<Vec<_>>()
|
||||||
while let Some(row) = rows.next().await {
|
.await?;
|
||||||
let row = row?;
|
|
||||||
let name = row.try_get(stmt.columns(), 0)?;
|
|
||||||
let oid = row.try_get(stmt.columns(), 1)?;
|
|
||||||
oids.push((name, oid));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut fields = vec![];
|
let mut fields = vec![];
|
||||||
for (name, oid) in oids {
|
for row in rows {
|
||||||
let type_ = get_type_rec(client, cache, oid).await?;
|
let name = row.try_get(0)?;
|
||||||
|
let oid = row.try_get(1)?;
|
||||||
|
let type_ = get_type_rec(client, oid).await?;
|
||||||
fields.push(Field::new(name, type_));
|
fields.push(Field::new(name, type_));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(fields)
|
Ok(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_composite_statement<'c>(
|
async fn typeinfo_composite_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
||||||
client: &mut InnerClient,
|
if let Some(stmt) = client.typeinfo_composite() {
|
||||||
cache: &'c mut CachedTypeInfo,
|
return Ok(stmt);
|
||||||
) -> Result<&'c Statement, Error> {
|
|
||||||
if cache.typeinfo_composite().is_some() {
|
|
||||||
// needed to get around a borrow checker limitation
|
|
||||||
return Ok(cache.typeinfo_composite().unwrap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = prepare_rec(client, cache, TYPEINFO_COMPOSITE_QUERY, &[]).await?;
|
let stmt = prepare_rec(client, TYPEINFO_COMPOSITE_QUERY, &[]).await?;
|
||||||
|
|
||||||
Ok(cache.set_typeinfo_composite(stmt))
|
client.set_typeinfo_composite(&stmt);
|
||||||
|
Ok(stmt)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ use postgres_types2::{Format, ToSql, Type};
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::marker::PhantomPinned;
|
use std::marker::PhantomPinned;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]);
|
struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]);
|
||||||
@@ -25,10 +26,10 @@ impl fmt::Debug for BorrowToSqlParamsDebug<'_> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn query<'a, I>(
|
pub async fn query<'a, I>(
|
||||||
client: &mut InnerClient,
|
client: &InnerClient,
|
||||||
statement: &Statement,
|
statement: Statement,
|
||||||
params: I,
|
params: I,
|
||||||
) -> Result<RawRowStream, Error>
|
) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
I::IntoIter: ExactSizeIterator,
|
I::IntoIter: ExactSizeIterator,
|
||||||
@@ -40,12 +41,13 @@ where
|
|||||||
statement.name(),
|
statement.name(),
|
||||||
BorrowToSqlParamsDebug(params.as_slice()),
|
BorrowToSqlParamsDebug(params.as_slice()),
|
||||||
);
|
);
|
||||||
encode(client, statement, params)?
|
encode(client, &statement, params)?
|
||||||
} else {
|
} else {
|
||||||
encode(client, statement, params)?
|
encode(client, &statement, params)?
|
||||||
};
|
};
|
||||||
let responses = start(client, buf).await?;
|
let responses = start(client, buf).await?;
|
||||||
Ok(RawRowStream {
|
Ok(RowStream {
|
||||||
|
statement,
|
||||||
responses,
|
responses,
|
||||||
command_tag: None,
|
command_tag: None,
|
||||||
status: ReadyForQueryStatus::Unknown,
|
status: ReadyForQueryStatus::Unknown,
|
||||||
@@ -55,7 +57,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn query_txt<S, I>(
|
pub async fn query_txt<S, I>(
|
||||||
client: &mut InnerClient,
|
client: &Arc<InnerClient>,
|
||||||
query: &str,
|
query: &str,
|
||||||
params: I,
|
params: I,
|
||||||
) -> Result<RowStream, Error>
|
) -> Result<RowStream, Error>
|
||||||
@@ -155,6 +157,49 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn execute<'a, I>(
|
||||||
|
client: &InnerClient,
|
||||||
|
statement: Statement,
|
||||||
|
params: I,
|
||||||
|
) -> Result<u64, Error>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
|
I::IntoIter: ExactSizeIterator,
|
||||||
|
{
|
||||||
|
let buf = if log_enabled!(Level::Debug) {
|
||||||
|
let params = params.into_iter().collect::<Vec<_>>();
|
||||||
|
debug!(
|
||||||
|
"executing statement {} with parameters: {:?}",
|
||||||
|
statement.name(),
|
||||||
|
BorrowToSqlParamsDebug(params.as_slice()),
|
||||||
|
);
|
||||||
|
encode(client, &statement, params)?
|
||||||
|
} else {
|
||||||
|
encode(client, &statement, params)?
|
||||||
|
};
|
||||||
|
let mut responses = start(client, buf).await?;
|
||||||
|
|
||||||
|
let mut rows = 0;
|
||||||
|
loop {
|
||||||
|
match responses.next().await? {
|
||||||
|
Message::DataRow(_) => {}
|
||||||
|
Message::CommandComplete(body) => {
|
||||||
|
rows = body
|
||||||
|
.tag()
|
||||||
|
.map_err(Error::parse)?
|
||||||
|
.rsplit(' ')
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(0);
|
||||||
|
}
|
||||||
|
Message::EmptyQueryResponse => rows = 0,
|
||||||
|
Message::ReadyForQuery(_) => return Ok(rows),
|
||||||
|
_ => return Err(Error::unexpected_message()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
||||||
let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
||||||
|
|
||||||
@@ -166,11 +211,7 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
|||||||
Ok(responses)
|
Ok(responses)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode<'a, I>(
|
pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result<Bytes, Error>
|
||||||
client: &mut InnerClient,
|
|
||||||
statement: &Statement,
|
|
||||||
params: I,
|
|
||||||
) -> Result<Bytes, Error>
|
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
I::IntoIter: ExactSizeIterator,
|
I::IntoIter: ExactSizeIterator,
|
||||||
@@ -255,7 +296,11 @@ impl Stream for RowStream {
|
|||||||
loop {
|
loop {
|
||||||
match ready!(this.responses.poll_next(cx)?) {
|
match ready!(this.responses.poll_next(cx)?) {
|
||||||
Message::DataRow(body) => {
|
Message::DataRow(body) => {
|
||||||
return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
|
return Poll::Ready(Some(Ok(Row::new(
|
||||||
|
this.statement.clone(),
|
||||||
|
body,
|
||||||
|
*this.output_format,
|
||||||
|
)?)))
|
||||||
}
|
}
|
||||||
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
||||||
Message::CommandComplete(body) => {
|
Message::CommandComplete(body) => {
|
||||||
@@ -293,41 +338,3 @@ impl RowStream {
|
|||||||
self.status
|
self.status
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pin_project! {
|
|
||||||
/// A stream of table rows.
|
|
||||||
pub struct RawRowStream {
|
|
||||||
responses: Responses,
|
|
||||||
command_tag: Option<String>,
|
|
||||||
output_format: Format,
|
|
||||||
status: ReadyForQueryStatus,
|
|
||||||
#[pin]
|
|
||||||
_p: PhantomPinned,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Stream for RawRowStream {
|
|
||||||
type Item = Result<Row, Error>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
||||||
let this = self.project();
|
|
||||||
loop {
|
|
||||||
match ready!(this.responses.poll_next(cx)?) {
|
|
||||||
Message::DataRow(body) => {
|
|
||||||
return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
|
|
||||||
}
|
|
||||||
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
|
||||||
Message::CommandComplete(body) => {
|
|
||||||
if let Ok(tag) = body.tag() {
|
|
||||||
*this.command_tag = Some(tag.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Message::ReadyForQuery(status) => {
|
|
||||||
*this.status = status.into();
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,16 +1,103 @@
|
|||||||
//! Rows.
|
//! Rows.
|
||||||
|
|
||||||
|
use crate::row::sealed::{AsName, Sealed};
|
||||||
|
use crate::simple_query::SimpleColumn;
|
||||||
use crate::statement::Column;
|
use crate::statement::Column;
|
||||||
use crate::types::{FromSql, Type, WrongType};
|
use crate::types::{FromSql, Type, WrongType};
|
||||||
use crate::Error;
|
use crate::{Error, Statement};
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use postgres_protocol2::message::backend::DataRowBody;
|
use postgres_protocol2::message::backend::DataRowBody;
|
||||||
use postgres_types2::{Format, WrongFormat};
|
use postgres_types2::{Format, WrongFormat};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::str;
|
use std::str;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
mod sealed {
|
||||||
|
pub trait Sealed {}
|
||||||
|
|
||||||
|
pub trait AsName {
|
||||||
|
fn as_name(&self) -> &str;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsName for Column {
|
||||||
|
fn as_name(&self) -> &str {
|
||||||
|
self.name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsName for String {
|
||||||
|
fn as_name(&self) -> &str {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A trait implemented by types that can index into columns of a row.
|
||||||
|
///
|
||||||
|
/// This cannot be implemented outside of this crate.
|
||||||
|
pub trait RowIndex: Sealed {
|
||||||
|
#[doc(hidden)]
|
||||||
|
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
||||||
|
where
|
||||||
|
T: AsName;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealed for usize {}
|
||||||
|
|
||||||
|
impl RowIndex for usize {
|
||||||
|
#[inline]
|
||||||
|
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
||||||
|
where
|
||||||
|
T: AsName,
|
||||||
|
{
|
||||||
|
if *self >= columns.len() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(*self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealed for str {}
|
||||||
|
|
||||||
|
impl RowIndex for str {
|
||||||
|
#[inline]
|
||||||
|
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
||||||
|
where
|
||||||
|
T: AsName,
|
||||||
|
{
|
||||||
|
if let Some(idx) = columns.iter().position(|d| d.as_name() == self) {
|
||||||
|
return Some(idx);
|
||||||
|
};
|
||||||
|
|
||||||
|
// FIXME ASCII-only case insensitivity isn't really the right thing to
|
||||||
|
// do. Postgres itself uses a dubious wrapper around tolower and JDBC
|
||||||
|
// uses the US locale.
|
||||||
|
columns
|
||||||
|
.iter()
|
||||||
|
.position(|d| d.as_name().eq_ignore_ascii_case(self))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Sealed for &T where T: ?Sized + Sealed {}
|
||||||
|
|
||||||
|
impl<T> RowIndex for &T
|
||||||
|
where
|
||||||
|
T: ?Sized + RowIndex,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn __idx<U>(&self, columns: &[U]) -> Option<usize>
|
||||||
|
where
|
||||||
|
U: AsName,
|
||||||
|
{
|
||||||
|
T::__idx(*self, columns)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A row of data returned from the database by a query.
|
/// A row of data returned from the database by a query.
|
||||||
pub struct Row {
|
pub struct Row {
|
||||||
|
statement: Statement,
|
||||||
output_format: Format,
|
output_format: Format,
|
||||||
body: DataRowBody,
|
body: DataRowBody,
|
||||||
ranges: Vec<Option<Range<usize>>>,
|
ranges: Vec<Option<Range<usize>>>,
|
||||||
@@ -18,33 +105,80 @@ pub struct Row {
|
|||||||
|
|
||||||
impl fmt::Debug for Row {
|
impl fmt::Debug for Row {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.debug_struct("Row").finish()
|
f.debug_struct("Row")
|
||||||
|
.field("columns", &self.columns())
|
||||||
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Row {
|
impl Row {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
// statement: Statement,
|
statement: Statement,
|
||||||
body: DataRowBody,
|
body: DataRowBody,
|
||||||
output_format: Format,
|
output_format: Format,
|
||||||
) -> Result<Row, Error> {
|
) -> Result<Row, Error> {
|
||||||
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
||||||
Ok(Row {
|
Ok(Row {
|
||||||
|
statement,
|
||||||
body,
|
body,
|
||||||
ranges,
|
ranges,
|
||||||
output_format,
|
output_format,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn try_get<'a, T>(&'a self, columns: &[Column], idx: usize) -> Result<T, Error>
|
/// Returns information about the columns of data in the row.
|
||||||
|
pub fn columns(&self) -> &[Column] {
|
||||||
|
self.statement.columns()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determines if the row contains no values.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of values in the row.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.columns().len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes a value from the row.
|
||||||
|
///
|
||||||
|
/// The value can be specified either by its numeric index in the row, or by its column name.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
|
||||||
|
pub fn get<'a, I, T>(&'a self, idx: I) -> T
|
||||||
where
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
T: FromSql<'a>,
|
T: FromSql<'a>,
|
||||||
{
|
{
|
||||||
let Some(column) = columns.get(idx) else {
|
match self.get_inner(&idx) {
|
||||||
return Err(Error::column(idx.to_string()));
|
Ok(ok) => ok,
|
||||||
|
Err(err) => panic!("error retrieving column {}: {}", idx, err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `Row::get`, but returns a `Result` rather than panicking.
|
||||||
|
pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result<T, Error>
|
||||||
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
|
T: FromSql<'a>,
|
||||||
|
{
|
||||||
|
self.get_inner(&idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result<T, Error>
|
||||||
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
|
T: FromSql<'a>,
|
||||||
|
{
|
||||||
|
let idx = match idx.__idx(self.columns()) {
|
||||||
|
Some(idx) => idx,
|
||||||
|
None => return Err(Error::column(idx.to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let ty = column.type_();
|
let ty = self.columns()[idx].type_();
|
||||||
if !T::accepts(ty) {
|
if !T::accepts(ty) {
|
||||||
return Err(Error::from_sql(
|
return Err(Error::from_sql(
|
||||||
Box::new(WrongType::new::<T>(ty.clone())),
|
Box::new(WrongType::new::<T>(ty.clone())),
|
||||||
@@ -82,3 +216,85 @@ impl Row {
|
|||||||
self.body.buffer().len()
|
self.body.buffer().len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsName for SimpleColumn {
|
||||||
|
fn as_name(&self) -> &str {
|
||||||
|
self.name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A row of data returned from the database by a simple query.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct SimpleQueryRow {
|
||||||
|
columns: Arc<[SimpleColumn]>,
|
||||||
|
body: DataRowBody,
|
||||||
|
ranges: Vec<Option<Range<usize>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SimpleQueryRow {
|
||||||
|
#[allow(clippy::new_ret_no_self)]
|
||||||
|
pub(crate) fn new(
|
||||||
|
columns: Arc<[SimpleColumn]>,
|
||||||
|
body: DataRowBody,
|
||||||
|
) -> Result<SimpleQueryRow, Error> {
|
||||||
|
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
||||||
|
Ok(SimpleQueryRow {
|
||||||
|
columns,
|
||||||
|
body,
|
||||||
|
ranges,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns information about the columns of data in the row.
|
||||||
|
pub fn columns(&self) -> &[SimpleColumn] {
|
||||||
|
&self.columns
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determines if the row contains no values.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of values in the row.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.columns.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a value from the row.
|
||||||
|
///
|
||||||
|
/// The value can be specified either by its numeric index in the row, or by its column name.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
|
||||||
|
pub fn get<I>(&self, idx: I) -> Option<&str>
|
||||||
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
|
{
|
||||||
|
match self.get_inner(&idx) {
|
||||||
|
Ok(ok) => ok,
|
||||||
|
Err(err) => panic!("error retrieving column {}: {}", idx, err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking.
|
||||||
|
pub fn try_get<I>(&self, idx: I) -> Result<Option<&str>, Error>
|
||||||
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
|
{
|
||||||
|
self.get_inner(&idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_inner<I>(&self, idx: &I) -> Result<Option<&str>, Error>
|
||||||
|
where
|
||||||
|
I: RowIndex + fmt::Display,
|
||||||
|
{
|
||||||
|
let idx = match idx.__idx(&self.columns) {
|
||||||
|
Some(idx) => idx,
|
||||||
|
None => return Err(Error::column(idx.to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]);
|
||||||
|
FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1,52 @@
|
|||||||
use crate::client::InnerClient;
|
use crate::client::{InnerClient, Responses};
|
||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
use crate::{Error, ReadyForQueryStatus};
|
use crate::{Error, ReadyForQueryStatus, SimpleQueryMessage, SimpleQueryRow};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use fallible_iterator::FallibleIterator;
|
||||||
|
use futures_util::{ready, Stream};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
use pin_project_lite::pin_project;
|
||||||
use postgres_protocol2::message::backend::Message;
|
use postgres_protocol2::message::backend::Message;
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
|
use std::marker::PhantomPinned;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
/// Information about a column of a single query row.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct SimpleColumn {
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SimpleColumn {
|
||||||
|
pub(crate) fn new(name: String) -> SimpleColumn {
|
||||||
|
SimpleColumn { name }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the name of the column.
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
&self.name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn simple_query(client: &InnerClient, query: &str) -> Result<SimpleQueryStream, Error> {
|
||||||
|
debug!("executing simple query: {}", query);
|
||||||
|
|
||||||
|
let buf = encode(client, query)?;
|
||||||
|
let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
||||||
|
|
||||||
|
Ok(SimpleQueryStream {
|
||||||
|
responses,
|
||||||
|
columns: None,
|
||||||
|
status: ReadyForQueryStatus::Unknown,
|
||||||
|
_p: PhantomPinned,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn batch_execute(
|
pub async fn batch_execute(
|
||||||
client: &mut InnerClient,
|
client: &InnerClient,
|
||||||
query: &str,
|
query: &str,
|
||||||
) -> Result<ReadyForQueryStatus, Error> {
|
) -> Result<ReadyForQueryStatus, Error> {
|
||||||
debug!("executing statement batch: {}", query);
|
debug!("executing statement batch: {}", query);
|
||||||
@@ -28,9 +66,77 @@ pub async fn batch_execute(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn encode(client: &mut InnerClient, query: &str) -> Result<Bytes, Error> {
|
pub(crate) fn encode(client: &InnerClient, query: &str) -> Result<Bytes, Error> {
|
||||||
client.with_buf(|buf| {
|
client.with_buf(|buf| {
|
||||||
frontend::query(query, buf).map_err(Error::encode)?;
|
frontend::query(query, buf).map_err(Error::encode)?;
|
||||||
Ok(buf.split().freeze())
|
Ok(buf.split().freeze())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
/// A stream of simple query results.
|
||||||
|
pub struct SimpleQueryStream {
|
||||||
|
responses: Responses,
|
||||||
|
columns: Option<Arc<[SimpleColumn]>>,
|
||||||
|
status: ReadyForQueryStatus,
|
||||||
|
#[pin]
|
||||||
|
_p: PhantomPinned,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SimpleQueryStream {
|
||||||
|
/// Returns if the connection is ready for querying, with the status of the connection.
|
||||||
|
///
|
||||||
|
/// This might be available only after the stream has been exhausted.
|
||||||
|
pub fn ready_status(&self) -> ReadyForQueryStatus {
|
||||||
|
self.status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stream for SimpleQueryStream {
|
||||||
|
type Item = Result<SimpleQueryMessage, Error>;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let this = self.project();
|
||||||
|
loop {
|
||||||
|
match ready!(this.responses.poll_next(cx)?) {
|
||||||
|
Message::CommandComplete(body) => {
|
||||||
|
let rows = body
|
||||||
|
.tag()
|
||||||
|
.map_err(Error::parse)?
|
||||||
|
.rsplit(' ')
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(0);
|
||||||
|
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows))));
|
||||||
|
}
|
||||||
|
Message::EmptyQueryResponse => {
|
||||||
|
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0))));
|
||||||
|
}
|
||||||
|
Message::RowDescription(body) => {
|
||||||
|
let columns = body
|
||||||
|
.fields()
|
||||||
|
.map(|f| Ok(SimpleColumn::new(f.name().to_string())))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.map_err(Error::parse)?
|
||||||
|
.into();
|
||||||
|
|
||||||
|
*this.columns = Some(columns);
|
||||||
|
}
|
||||||
|
Message::DataRow(body) => {
|
||||||
|
let row = match &this.columns {
|
||||||
|
Some(columns) => SimpleQueryRow::new(columns.clone(), body)?,
|
||||||
|
None => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
||||||
|
};
|
||||||
|
return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row))));
|
||||||
|
}
|
||||||
|
Message::ReadyForQuery(s) => {
|
||||||
|
*this.status = s.into();
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,33 +1,64 @@
|
|||||||
|
use crate::client::InnerClient;
|
||||||
|
use crate::codec::FrontendMessage;
|
||||||
|
use crate::connection::RequestMessages;
|
||||||
use crate::types::Type;
|
use crate::types::Type;
|
||||||
use postgres_protocol2::{message::backend::Field, Oid};
|
use postgres_protocol2::{
|
||||||
use std::fmt;
|
message::{backend::Field, frontend},
|
||||||
|
Oid,
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
sync::{Arc, Weak},
|
||||||
|
};
|
||||||
|
|
||||||
struct StatementInner {
|
struct StatementInner {
|
||||||
|
client: Weak<InnerClient>,
|
||||||
name: String,
|
name: String,
|
||||||
params: Vec<Type>,
|
params: Vec<Type>,
|
||||||
columns: Vec<Column>,
|
columns: Vec<Column>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for StatementInner {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(client) = self.client.upgrade() {
|
||||||
|
let buf = client.with_buf(|buf| {
|
||||||
|
frontend::close(b'S', &self.name, buf).unwrap();
|
||||||
|
frontend::sync(buf);
|
||||||
|
buf.split().freeze()
|
||||||
|
});
|
||||||
|
let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A prepared statement.
|
/// A prepared statement.
|
||||||
///
|
///
|
||||||
/// Prepared statements can only be used with the connection that created them.
|
/// Prepared statements can only be used with the connection that created them.
|
||||||
pub struct Statement(StatementInner);
|
#[derive(Clone)]
|
||||||
|
pub struct Statement(Arc<StatementInner>);
|
||||||
|
|
||||||
impl Statement {
|
impl Statement {
|
||||||
pub(crate) fn new(name: String, params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
pub(crate) fn new(
|
||||||
Statement(StatementInner {
|
inner: &Arc<InnerClient>,
|
||||||
|
name: String,
|
||||||
|
params: Vec<Type>,
|
||||||
|
columns: Vec<Column>,
|
||||||
|
) -> Statement {
|
||||||
|
Statement(Arc::new(StatementInner {
|
||||||
|
client: Arc::downgrade(inner),
|
||||||
name,
|
name,
|
||||||
params,
|
params,
|
||||||
columns,
|
columns,
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
||||||
Statement(StatementInner {
|
Statement(Arc::new(StatementInner {
|
||||||
|
client: Weak::new(),
|
||||||
name: String::new(),
|
name: String::new(),
|
||||||
params,
|
params,
|
||||||
columns,
|
columns,
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn name(&self) -> &str {
|
pub(crate) fn name(&self) -> &str {
|
||||||
|
|||||||
57
libs/proxy/tokio-postgres2/src/to_statement.rs
Normal file
57
libs/proxy/tokio-postgres2/src/to_statement.rs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
use crate::to_statement::private::{Sealed, ToStatementType};
|
||||||
|
use crate::Statement;
|
||||||
|
|
||||||
|
mod private {
|
||||||
|
use crate::{Client, Error, Statement};
|
||||||
|
|
||||||
|
pub trait Sealed {}
|
||||||
|
|
||||||
|
pub enum ToStatementType<'a> {
|
||||||
|
Statement(&'a Statement),
|
||||||
|
Query(&'a str),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToStatementType<'_> {
|
||||||
|
pub async fn into_statement(self, client: &Client) -> Result<Statement, Error> {
|
||||||
|
match self {
|
||||||
|
ToStatementType::Statement(s) => Ok(s.clone()),
|
||||||
|
ToStatementType::Query(s) => client.prepare(s).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A trait abstracting over prepared and unprepared statements.
|
||||||
|
///
|
||||||
|
/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which
|
||||||
|
/// was prepared previously.
|
||||||
|
///
|
||||||
|
/// This trait is "sealed" and cannot be implemented by anything outside this crate.
|
||||||
|
pub trait ToStatement: Sealed {
|
||||||
|
#[doc(hidden)]
|
||||||
|
fn __convert(&self) -> ToStatementType<'_>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToStatement for Statement {
|
||||||
|
fn __convert(&self) -> ToStatementType<'_> {
|
||||||
|
ToStatementType::Statement(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealed for Statement {}
|
||||||
|
|
||||||
|
impl ToStatement for str {
|
||||||
|
fn __convert(&self) -> ToStatementType<'_> {
|
||||||
|
ToStatementType::Query(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealed for str {}
|
||||||
|
|
||||||
|
impl ToStatement for String {
|
||||||
|
fn __convert(&self) -> ToStatementType<'_> {
|
||||||
|
ToStatementType::Query(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealed for String {}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
|
use crate::query::RowStream;
|
||||||
use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
|
use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
|
|
||||||
@@ -18,13 +19,13 @@ impl Drop for Transaction<'_> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let buf = self.client.inner.with_buf(|buf| {
|
let buf = self.client.inner().with_buf(|buf| {
|
||||||
frontend::query("ROLLBACK", buf).unwrap();
|
frontend::query("ROLLBACK", buf).unwrap();
|
||||||
buf.split().freeze()
|
buf.split().freeze()
|
||||||
});
|
});
|
||||||
let _ = self
|
let _ = self
|
||||||
.client
|
.client
|
||||||
.inner
|
.inner()
|
||||||
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -51,13 +52,23 @@ impl<'a> Transaction<'a> {
|
|||||||
self.client.batch_execute("ROLLBACK").await
|
self.client.batch_execute("ROLLBACK").await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Like `Client::query_raw_txt`.
|
||||||
|
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
|
where
|
||||||
|
S: AsRef<str>,
|
||||||
|
I: IntoIterator<Item = Option<S>>,
|
||||||
|
I::IntoIter: ExactSizeIterator,
|
||||||
|
{
|
||||||
|
self.client.query_raw_txt(statement, params).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Like `Client::cancel_token`.
|
/// Like `Client::cancel_token`.
|
||||||
pub fn cancel_token(&self) -> CancelToken {
|
pub fn cancel_token(&self) -> CancelToken {
|
||||||
self.client.cancel_token()
|
self.client.cancel_token()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the underlying `Client`.
|
/// Returns a reference to the underlying `Client`.
|
||||||
pub fn client(&mut self) -> &mut Client {
|
pub fn client(&self) -> &Client {
|
||||||
self.client
|
self.client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ camino = { workspace = true, features = ["serde1"] }
|
|||||||
humantime-serde.workspace = true
|
humantime-serde.workspace = true
|
||||||
hyper = { workspace = true, features = ["client"] }
|
hyper = { workspace = true, features = ["client"] }
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio = { workspace = true, features = ["sync", "fs", "io-util"] }
|
tokio = { workspace = true, features = ["sync", "fs", "io-util"] }
|
||||||
|
|||||||
@@ -13,10 +13,12 @@ use std::time::Duration;
|
|||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
use super::REMOTE_STORAGE_PREFIX_SEPARATOR;
|
use super::REMOTE_STORAGE_PREFIX_SEPARATOR;
|
||||||
|
use anyhow::Context;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range};
|
use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range};
|
||||||
|
use azure_core::HttpClient;
|
||||||
|
use azure_core::TransportOptions;
|
||||||
use azure_core::{Continuable, RetryOptions};
|
use azure_core::{Continuable, RetryOptions};
|
||||||
use azure_identity::DefaultAzureCredential;
|
|
||||||
use azure_storage::StorageCredentials;
|
use azure_storage::StorageCredentials;
|
||||||
use azure_storage_blobs::blob::CopyStatus;
|
use azure_storage_blobs::blob::CopyStatus;
|
||||||
use azure_storage_blobs::prelude::ClientBuilder;
|
use azure_storage_blobs::prelude::ClientBuilder;
|
||||||
@@ -76,12 +78,18 @@ impl AzureBlobStorage {
|
|||||||
let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") {
|
let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") {
|
||||||
StorageCredentials::access_key(account.clone(), access_key)
|
StorageCredentials::access_key(account.clone(), access_key)
|
||||||
} else {
|
} else {
|
||||||
let token_credential = DefaultAzureCredential::default();
|
let token_credential = azure_identity::create_default_credential()
|
||||||
StorageCredentials::token_credential(Arc::new(token_credential))
|
.context("trying to obtain Azure default credentials")?;
|
||||||
|
StorageCredentials::token_credential(token_credential)
|
||||||
};
|
};
|
||||||
|
|
||||||
// we have an outer retry
|
let builder = ClientBuilder::new(account, credentials)
|
||||||
let builder = ClientBuilder::new(account, credentials).retry(RetryOptions::none());
|
// we have an outer retry
|
||||||
|
.retry(RetryOptions::none())
|
||||||
|
// Customize transport to configure conneciton pooling
|
||||||
|
.transport(TransportOptions::new(Self::reqwest_client(
|
||||||
|
azure_config.conn_pool_size,
|
||||||
|
)));
|
||||||
|
|
||||||
let client = builder.container_client(azure_config.container_name.to_owned());
|
let client = builder.container_client(azure_config.container_name.to_owned());
|
||||||
|
|
||||||
@@ -106,6 +114,14 @@ impl AzureBlobStorage {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reqwest_client(conn_pool_size: usize) -> Arc<dyn HttpClient> {
|
||||||
|
let client = reqwest::ClientBuilder::new()
|
||||||
|
.pool_max_idle_per_host(conn_pool_size)
|
||||||
|
.build()
|
||||||
|
.expect("failed to build `reqwest` client");
|
||||||
|
Arc::new(client)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn relative_path_to_name(&self, path: &RemotePath) -> String {
|
pub fn relative_path_to_name(&self, path: &RemotePath) -> String {
|
||||||
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
||||||
let path_string = path.get_path().as_str();
|
let path_string = path.get_path().as_str();
|
||||||
@@ -544,9 +560,9 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects<'a>(
|
async fn delete_objects(
|
||||||
&self,
|
&self,
|
||||||
paths: &'a [RemotePath],
|
paths: &[RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let kind = RequestKind::Delete;
|
let kind = RequestKind::Delete;
|
||||||
@@ -624,6 +640,10 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn max_keys_per_delete(&self) -> usize {
|
||||||
|
super::MAX_KEYS_PER_DELETE_AZURE
|
||||||
|
}
|
||||||
|
|
||||||
async fn copy(
|
async fn copy(
|
||||||
&self,
|
&self,
|
||||||
from: &RemotePath,
|
from: &RemotePath,
|
||||||
|
|||||||
@@ -114,6 +114,16 @@ fn default_max_keys_per_list_response() -> Option<i32> {
|
|||||||
DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
|
DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_azure_conn_pool_size() -> usize {
|
||||||
|
// Conservative default: no connection pooling. At time of writing this is the Azure
|
||||||
|
// SDK's default as well, due to historic reports of hard-to-reproduce issues
|
||||||
|
// (https://github.com/hyperium/hyper/issues/2312)
|
||||||
|
//
|
||||||
|
// However, using connection pooling is important to avoid exhausting client ports when
|
||||||
|
// doing huge numbers of requests (https://github.com/neondatabase/cloud/issues/20971)
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
impl Debug for S3Config {
|
impl Debug for S3Config {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.debug_struct("S3Config")
|
f.debug_struct("S3Config")
|
||||||
@@ -146,6 +156,8 @@ pub struct AzureConfig {
|
|||||||
pub concurrency_limit: NonZeroUsize,
|
pub concurrency_limit: NonZeroUsize,
|
||||||
#[serde(default = "default_max_keys_per_list_response")]
|
#[serde(default = "default_max_keys_per_list_response")]
|
||||||
pub max_keys_per_list_response: Option<i32>,
|
pub max_keys_per_list_response: Option<i32>,
|
||||||
|
#[serde(default = "default_azure_conn_pool_size")]
|
||||||
|
pub conn_pool_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
|
fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
|
||||||
@@ -302,6 +314,7 @@ timeout = '5s'";
|
|||||||
container_region = 'westeurope'
|
container_region = 'westeurope'
|
||||||
upload_storage_class = 'INTELLIGENT_TIERING'
|
upload_storage_class = 'INTELLIGENT_TIERING'
|
||||||
timeout = '7s'
|
timeout = '7s'
|
||||||
|
conn_pool_size = 8
|
||||||
";
|
";
|
||||||
|
|
||||||
let config = parse(toml).unwrap();
|
let config = parse(toml).unwrap();
|
||||||
@@ -316,6 +329,7 @@ timeout = '5s'";
|
|||||||
prefix_in_container: None,
|
prefix_in_container: None,
|
||||||
concurrency_limit: default_remote_storage_azure_concurrency_limit(),
|
concurrency_limit: default_remote_storage_azure_concurrency_limit(),
|
||||||
max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
|
max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
|
||||||
|
conn_pool_size: 8,
|
||||||
}),
|
}),
|
||||||
timeout: Duration::from_secs(7),
|
timeout: Duration::from_secs(7),
|
||||||
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
|
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user