diff --git a/.dockerignore b/.dockerignore index c7a2f78e32..3c4a748cf7 100644 --- a/.dockerignore +++ b/.dockerignore @@ -13,6 +13,7 @@ # Directories !.cargo/ !.config/ +!compute/ !compute_tools/ !control_plane/ !libs/ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index c1ec3f207b..a634edb96b 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -120,6 +120,59 @@ jobs: - name: Run mypy to check types run: poetry run mypy . + # Check that the vendor/postgres-* submodules point to the + # corresponding REL_*_STABLE_neon branches. + check-submodules: + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: true + + - uses: dorny/paths-filter@v3 + id: check-if-submodules-changed + with: + filters: | + vendor: + - 'vendor/**' + + - name: Check vendor/postgres-v14 submodule reference + if: steps.check-if-submodules-changed.outputs.vendor == 'true' + uses: jtmullen/submodule-branch-check-action@v1 + with: + path: "vendor/postgres-v14" + fetch_depth: "50" + sub_fetch_depth: "50" + pass_if_unchanged: true + + - name: Check vendor/postgres-v15 submodule reference + if: steps.check-if-submodules-changed.outputs.vendor == 'true' + uses: jtmullen/submodule-branch-check-action@v1 + with: + path: "vendor/postgres-v15" + fetch_depth: "50" + sub_fetch_depth: "50" + pass_if_unchanged: true + + - name: Check vendor/postgres-v16 submodule reference + if: steps.check-if-submodules-changed.outputs.vendor == 'true' + uses: jtmullen/submodule-branch-check-action@v1 + with: + path: "vendor/postgres-v16" + fetch_depth: "50" + sub_fetch_depth: "50" + pass_if_unchanged: true + + - name: Check vendor/postgres-v17 submodule reference + if: steps.check-if-submodules-changed.outputs.vendor == 'true' + uses: jtmullen/submodule-branch-check-action@v1 + with: + path: "vendor/postgres-v17" + fetch_depth: "50" + sub_fetch_depth: "50" + pass_if_unchanged: true + check-codestyle-rust: needs: [ check-permissions, build-build-tools-image ] strategy: @@ -598,7 +651,7 @@ jobs: provenance: false push: true pull: true - file: Dockerfile.compute-node + file: compute/Dockerfile.compute-node cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }} cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/compute-node-{0}:cache-{1},mode=max', matrix.version, matrix.arch) || '' }} tags: | @@ -617,7 +670,7 @@ jobs: provenance: false push: true pull: true - file: Dockerfile.compute-node + file: compute/Dockerfile.compute-node target: neon-pg-ext-test cache-from: type=registry,ref=cache.neon.build/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }} cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/neon-test-extensions-{0}:cache-{1},mode=max', matrix.version, matrix.arch) || '' }} @@ -638,7 +691,7 @@ jobs: provenance: false push: true pull: true - file: Dockerfile.compute-node + file: compute/Dockerfile.compute-node tags: | neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} @@ -726,7 +779,7 @@ jobs: - name: Build vm image run: | ./vm-builder \ - -spec=vm-image-spec.yaml \ + -spec=compute/vm-image-spec.yaml \ -src=neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ -dst=neondatabase/vm-compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} diff --git a/.github/workflows/cloud-regress.yml b/.github/workflows/cloud-regress.yml new file mode 100644 index 0000000000..de6babdde3 --- /dev/null +++ b/.github/workflows/cloud-regress.yml @@ -0,0 +1,102 @@ +name: Cloud Regression Test +on: + schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + - cron: '45 1 * * *' # run once a day, timezone is utc + workflow_dispatch: # adds ability to run this manually + +defaults: + run: + shell: bash -euxo pipefail {0} + +concurrency: + # Allow only one workflow + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + regress: + env: + POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install + DEFAULT_PG_VERSION: 16 + TEST_OUTPUT: /tmp/test_output + BUILD_TYPE: remote + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }} + + runs-on: us-east-2 + container: + image: neondatabase/build-tools:pinned + options: --init + + steps: + - uses: actions/checkout@v4 + with: + submodules: true + + - name: Patch the test + run: | + cd "vendor/postgres-v${DEFAULT_PG_VERSION}" + patch -p1 < "../../patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch" + + - name: Generate a random password + id: pwgen + run: | + set +x + DBPASS=$(dd if=/dev/random bs=48 count=1 2>/dev/null | base64) + echo "::add-mask::${DBPASS//\//}" + echo DBPASS="${DBPASS//\//}" >> "${GITHUB_OUTPUT}" + + - name: Change tests according to the generated password + env: + DBPASS: ${{ steps.pwgen.outputs.DBPASS }} + run: | + cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress + for fname in sql/*.sql expected/*.out; do + sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}" + done + for ph in $(grep NEON_MD5_PLACEHOLDER expected/password.out | awk '{print $3;}' | sort | uniq); do + USER=$(echo "${ph}" | cut -c 22-) + MD5=md5$(echo -n "${DBPASS}${USER}" | md5sum | awk '{print $1;}') + sed -i.bak "s/${ph}/${MD5}/" expected/password.out + done + + - name: Download Neon artifact + uses: ./.github/actions/download + with: + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact + path: /tmp/neon/ + prefix: latest + + - name: Run the regression tests + uses: ./.github/actions/run-python-test-set + with: + build_type: ${{ env.BUILD_TYPE }} + test_selection: cloud_regress + pg_version: ${{ env.DEFAULT_PG_VERSION }} + extra_params: -m remote_cluster + env: + BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }} + + - name: Create Allure report + id: create-allure-report + if: ${{ !cancelled() }} + uses: ./.github/actions/allure-report-generate + + - name: Post to a Slack channel + if: ${{ github.event.schedule && failure() }} + uses: slackapi/slack-github-action@v1 + with: + channel-id: "C033QLM5P7D" # on-call-staging-stream + slack-message: | + Periodic pg_regress on staging: ${{ job.status }} + <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run> + <${{ steps.create-allure-report.outputs.report-url }}|Allure report> + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + diff --git a/.github/workflows/trigger-e2e-tests.yml b/.github/workflows/trigger-e2e-tests.yml index b299cf9b99..f25c1051cd 100644 --- a/.github/workflows/trigger-e2e-tests.yml +++ b/.github/workflows/trigger-e2e-tests.yml @@ -107,7 +107,7 @@ jobs: if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then for f in $(gh api "/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUMBER}/files" --paginate --jq '.[].filename'); do case "$f" in - vendor/*|pgxn/*|libs/vm_monitor/*|Dockerfile.compute-node) + vendor/*|pgxn/*|libs/vm_monitor/*|compute/Dockerfile.compute-node) platforms=$(echo "${platforms}" | jq --compact-output '. += ["k8s-neonvm"] | unique') ;; *) diff --git a/Cargo.lock b/Cargo.lock index 136f07956f..e4dbd8b333 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -255,12 +255,6 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "atomic" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" - [[package]] name = "atomic-take" version = "1.1.0" @@ -295,8 +289,8 @@ dependencies = [ "fastrand 2.0.0", "hex", "http 0.2.9", - "hyper 0.14.26", - "ring 0.17.6", + "hyper 0.14.30", + "ring", "time", "tokio", "tracing", @@ -486,7 +480,7 @@ dependencies = [ "once_cell", "p256 0.11.1", "percent-encoding", - "ring 0.17.6", + "ring", "sha2", "subtle", "time", @@ -593,7 +587,7 @@ dependencies = [ "http 0.2.9", "http-body 0.4.5", "http-body 1.0.0", - "hyper 0.14.26", + "hyper 0.14.30", "hyper-rustls 0.24.0", "once_cell", "pin-project-lite", @@ -684,7 +678,7 @@ dependencies = [ "futures-util", "http 0.2.9", "http-body 0.4.5", - "hyper 0.14.26", + "hyper 0.14.30", "itoa", "matchit 0.7.0", "memchr", @@ -1089,9 +1083,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -1100,18 +1094,18 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", - "half 1.8.2", + "half", ] [[package]] @@ -1224,7 +1218,7 @@ dependencies = [ "compute_api", "flate2", "futures", - "hyper 0.14.26", + "hyper 0.14.30", "nix 0.27.1", "notify", "num_cpus", @@ -1330,7 +1324,7 @@ dependencies = [ "git-version", "humantime", "humantime-serde", - "hyper 0.14.26", + "hyper 0.14.30", "nix 0.27.1", "once_cell", "pageserver_api", @@ -2304,12 +2298,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - [[package]] name = "half" version = "2.4.1" @@ -2411,17 +2399,6 @@ dependencies = [ "digest", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "hostname" version = "0.4.0" @@ -2430,7 +2407,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows 0.52.0", + "windows", ] [[package]] @@ -2539,9 +2516,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -2554,7 +2531,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -2589,7 +2566,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http 0.2.9", - "hyper 0.14.26", + "hyper 0.14.30", "log", "rustls 0.21.11", "rustls-native-certs 0.6.2", @@ -2620,7 +2597,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.26", + "hyper 0.14.30", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -2639,7 +2616,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.2.0", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", "tower", "tower-service", @@ -2648,16 +2625,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core", ] [[package]] @@ -2870,7 +2847,7 @@ dependencies = [ "base64 0.21.1", "js-sys", "pem", - "ring 0.17.6", + "ring", "serde", "serde_json", "simple_asn1", @@ -2908,11 +2885,11 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] @@ -2974,12 +2951,6 @@ dependencies = [ "hashbrown 0.14.5", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -3072,15 +3043,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.9.0" @@ -3660,7 +3622,7 @@ dependencies = [ "hex-literal", "humantime", "humantime-serde", - "hyper 0.14.26", + "hyper 0.14.30", "indoc", "itertools 0.10.5", "md5", @@ -3853,7 +3815,7 @@ dependencies = [ "ahash", "bytes", "chrono", - "half 2.4.1", + "half", "hashbrown 0.14.5", "num", "num-bigint", @@ -4140,7 +4102,7 @@ dependencies = [ "crc32c", "env_logger", "log", - "memoffset 0.8.0", + "memoffset 0.9.0", "once_cell", "postgres", "regex", @@ -4350,12 +4312,12 @@ dependencies = [ "hashlink", "hex", "hmac", - "hostname 0.3.1", + "hostname", "http 1.1.0", "http-body-util", "humantime", "humantime-serde", - "hyper 0.14.26", + "hyper 0.14.30", "hyper 1.2.0", "hyper-util", "indexmap 2.0.1", @@ -4400,7 +4362,7 @@ dependencies = [ "signature 2.2.0", "smallvec", "smol_str", - "socket2 0.5.5", + "socket2", "subtle", "thiserror", "tikv-jemalloc-ctl", @@ -4578,7 +4540,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" dependencies = [ "pem", - "ring 0.17.6", + "ring", "time", "yasna", ] @@ -4602,7 +4564,7 @@ dependencies = [ "rustls-pki-types", "ryu", "sha1_smol", - "socket2 0.5.5", + "socket2", "tokio", "tokio-rustls 0.25.0", "tokio-util", @@ -4714,7 +4676,7 @@ dependencies = [ "futures-util", "http-types", "humantime-serde", - "hyper 0.14.26", + "hyper 0.14.30", "itertools 0.10.5", "metrics", "once_cell", @@ -4747,7 +4709,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.9", "http-body 0.4.5", - "hyper 0.14.26", + "hyper 0.14.30", "hyper-rustls 0.24.0", "ipnet", "js-sys", @@ -4905,21 +4867,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.6" @@ -4929,8 +4876,8 @@ dependencies = [ "cc", "getrandom 0.2.11", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.48.0", ] @@ -4950,7 +4897,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "496c1d3718081c45ba9c31fbfc07417900aa96f4070ff90dc29961836b7a9945" dependencies = [ "http 0.2.9", - "hyper 0.14.26", + "hyper 0.14.30", "lazy_static", "percent-encoding", "regex", @@ -5074,7 +5021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", - "ring 0.17.6", + "ring", "rustls-webpki 0.101.7", "sct", ] @@ -5086,7 +5033,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.6", + "ring", "rustls-pki-types", "rustls-webpki 0.102.2", "subtle", @@ -5143,24 +5090,14 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" -[[package]] -name = "rustls-webpki" -version = "0.100.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.6", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -5169,9 +5106,9 @@ version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ - "ring 0.17.6", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -5205,7 +5142,7 @@ dependencies = [ "git-version", "hex", "humantime", - "hyper 0.14.26", + "hyper 0.14.30", "metrics", "once_cell", "parking_lot 0.12.1", @@ -5262,11 +5199,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.52.0", ] [[package]] @@ -5290,8 +5227,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.6", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -5400,7 +5337,7 @@ version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba8870c5dba2bfd9db25c75574a11429f6b95957b0a78ac02e2970dd7a5249a" dependencies = [ - "hostname 0.4.0", + "hostname", "libc", "os_info", "rustc_version", @@ -5712,16 +5649,6 @@ dependencies = [ "serde", ] -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -5732,12 +5659,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -5783,7 +5704,7 @@ dependencies = [ "futures-util", "git-version", "humantime", - "hyper 0.14.26", + "hyper 0.14.30", "metrics", "once_cell", "parking_lot 0.12.1", @@ -5812,7 +5733,7 @@ dependencies = [ "git-version", "hex", "humantime", - "hyper 0.14.26", + "hyper 0.14.30", "itertools 0.10.5", "lasso", "measured", @@ -6228,7 +6149,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -6288,7 +6209,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "socket2 0.5.5", + "socket2", "tokio", "tokio-util", ] @@ -6300,7 +6221,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ea13f22eda7127c827983bdaf0d7fff9df21c8817bab02815ac277a21143677" dependencies = [ "futures", - "ring 0.17.6", + "ring", "rustls 0.22.4", "tokio", "tokio-postgres", @@ -6434,7 +6355,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.9", "http-body 0.4.5", - "hyper 0.14.26", + "hyper 0.14.30", "hyper-timeout", "percent-encoding", "pin-project", @@ -6611,7 +6532,7 @@ dependencies = [ name = "tracing-utils" version = "0.1.0" dependencies = [ - "hyper 0.14.26", + "hyper 0.14.30", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -6714,12 +6635,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -6728,17 +6643,18 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.7.1" +version = "2.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" dependencies = [ - "base64 0.21.1", + "base64 0.22.1", "log", "once_cell", - "rustls 0.21.11", - "rustls-webpki 0.100.2", + "rustls 0.22.4", + "rustls-pki-types", + "rustls-webpki 0.102.2", "url", - "webpki-roots 0.23.1", + "webpki-roots 0.26.1", ] [[package]] @@ -6802,7 +6718,7 @@ dependencies = [ "hex", "hex-literal", "humantime", - "hyper 0.14.26", + "hyper 0.14.30", "jsonwebtoken", "metrics", "nix 0.27.1", @@ -6837,11 +6753,10 @@ dependencies = [ [[package]] name = "uuid" -version = "1.6.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "atomic", "getrandom 0.2.11", "serde", ] @@ -7075,15 +6990,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.2", -] - [[package]] name = "webpki-roots" version = "0.25.2" @@ -7152,15 +7058,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets 0.48.0", -] - [[package]] name = "windows" version = "0.52.0" @@ -7180,21 +7077,6 @@ dependencies = [ "windows-targets 0.52.4", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" @@ -7243,12 +7125,6 @@ dependencies = [ "windows_x86_64_msvc 0.52.4", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" @@ -7261,12 +7137,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.0" @@ -7279,12 +7149,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.0" @@ -7297,12 +7161,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.0" @@ -7315,12 +7173,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.0" @@ -7333,12 +7185,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" @@ -7351,12 +7197,6 @@ version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.0" @@ -7433,10 +7273,11 @@ dependencies = [ "futures-util", "generic-array", "getrandom 0.2.11", + "half", "hashbrown 0.14.5", "hex", "hmac", - "hyper 0.14.26", + "hyper 0.14.30", "indexmap 1.9.3", "itertools 0.10.5", "itertools 0.12.1", @@ -7504,7 +7345,7 @@ dependencies = [ "der 0.7.8", "hex", "pem", - "ring 0.17.6", + "ring", "signature 2.2.0", "spki 0.7.3", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index fd1d4e016c..a788dcf3cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,8 +76,6 @@ clap = { version = "4.0", features = ["derive"] } comfy-table = "7.1" const_format = "0.2" crc32c = "0.6" -crossbeam-deque = "0.8.5" -crossbeam-utils = "0.8.5" dashmap = { version = "5.5.0", features = ["raw-api"] } either = "1.8" enum-map = "2.4.2" @@ -95,7 +93,7 @@ hdrhistogram = "7.5.2" hex = "0.4" hex-literal = "0.4" hmac = "0.12.1" -hostname = "0.3.1" +hostname = "0.4" http = {version = "1.1.0", features = ["std"]} http-types = { version = "2", default-features = false } humantime = "2.1" @@ -104,7 +102,6 @@ hyper = "0.14" tokio-tungstenite = "0.20.0" indexmap = "2" indoc = "2" -inotify = "0.10.2" ipnet = "2.9.0" itertools = "0.10" jsonwebtoken = "9" @@ -113,7 +110,7 @@ libc = "0.2" md5 = "0.7.0" measured = { version = "0.0.22", features=["lasso"] } measured-process = { version = "0.0.22" } -memoffset = "0.8" +memoffset = "0.9" nix = { version = "0.27", features = ["dir", "fs", "process", "socket", "signal", "poll"] } notify = "6.0.0" num_cpus = "1.15" @@ -142,7 +139,6 @@ rpds = "0.13" rustc-hash = "1.1.0" rustls = "0.22" rustls-pemfile = "2" -rustls-split = "0.3" scopeguard = "1.1" sysinfo = "0.29.2" sd-notify = "0.4.1" @@ -164,7 +160,6 @@ strum_macros = "0.26" svg_fmt = "0.4.3" sync_wrapper = "0.1.2" tar = "0.4" -task-local-extensions = "0.1.4" test-context = "0.3" thiserror = "1.0" tikv-jemallocator = "0.5" diff --git a/Dockerfile.compute-node b/compute/Dockerfile.compute-node similarity index 96% rename from Dockerfile.compute-node rename to compute/Dockerfile.compute-node index 6bf6fb650f..18c68c116a 100644 --- a/Dockerfile.compute-node +++ b/compute/Dockerfile.compute-node @@ -280,7 +280,7 @@ FROM build-deps AS vector-pg-build ARG PG_VERSION COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -COPY patches/pgvector.patch /pgvector.patch +COPY compute/patches/pgvector.patch /pgvector.patch # By default, pgvector Makefile uses `-march=native`. We don't want that, # because we build the images on different machines than where we run them. @@ -366,7 +366,7 @@ FROM build-deps AS rum-pg-build ARG PG_VERSION COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -COPY patches/rum.patch /rum.patch +COPY compute/patches/rum.patch /rum.patch RUN case "${PG_VERSION}" in "v17") \ echo "v17 extensions are not supported yet. Quit" && exit 0;; \ @@ -1031,6 +1031,41 @@ FROM debian:bullseye-slim AS compute-tools-image COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl +######################################################################################### +# +# Layer "pgbouncer" +# +######################################################################################### + +FROM debian:bullseye-slim AS pgbouncer +RUN set -e \ + && apt-get update \ + && apt-get install -y \ + build-essential \ + git \ + libevent-dev \ + libtool \ + pkg-config + +# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc) +ENV PGBOUNCER_TAG=pgbouncer_1_22_1 +RUN set -e \ + && git clone --recurse-submodules --depth 1 --branch ${PGBOUNCER_TAG} https://github.com/pgbouncer/pgbouncer.git pgbouncer \ + && cd pgbouncer \ + && ./autogen.sh \ + && LDFLAGS=-static ./configure --prefix=/usr/local/pgbouncer --without-openssl \ + && make -j $(nproc) dist_man_MANS= \ + && make install dist_man_MANS= + +######################################################################################### +# +# Layers "postgres-exporter" and "sql-exporter" +# +######################################################################################### + +FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter +FROM burningalchemist/sql_exporter:0.13 AS sql-exporter + ######################################################################################### # # Clean up postgres folder before inclusion @@ -1078,7 +1113,7 @@ COPY --from=pgjwt-pg-build /pgjwt.tar.gz /ext-src COPY --from=hypopg-pg-build /hypopg.tar.gz /ext-src COPY --from=pg-hashids-pg-build /pg_hashids.tar.gz /ext-src COPY --from=rum-pg-build /rum.tar.gz /ext-src -COPY patches/rum.patch /ext-src +COPY compute/patches/rum.patch /ext-src #COPY --from=pgtap-pg-build /pgtap.tar.gz /ext-src COPY --from=ip4r-pg-build /ip4r.tar.gz /ext-src COPY --from=prefix-pg-build /prefix.tar.gz /ext-src @@ -1086,9 +1121,9 @@ COPY --from=hll-pg-build /hll.tar.gz /ext-src COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src #COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src -COPY patches/pg_hint_plan.patch /ext-src +COPY compute/patches/pg_hint_plan.patch /ext-src COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src -COPY patches/pg_cron.patch /ext-src +COPY compute/patches/pg_cron.patch /ext-src #COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src #COPY --from=rdkit-pg-build /rdkit.tar.gz /ext-src COPY --from=pg-uuidv7-pg-build /pg_uuidv7.tar.gz /ext-src @@ -1097,7 +1132,7 @@ COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src #COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src #COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src -COPY patches/pg_anon.patch /ext-src +COPY compute/patches/pg_anon.patch /ext-src COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src RUN case "${PG_VERSION}" in "v17") \ @@ -1160,9 +1195,23 @@ RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \ COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl +# pgbouncer and its config +COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/pgbouncer +COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini + +# Metrics exporter binaries and configuration files +COPY --from=postgres-exporter /bin/postgres_exporter /bin/postgres_exporter +COPY --from=sql-exporter /bin/sql_exporter /bin/sql_exporter + +COPY --chmod=0644 compute/etc/sql_exporter.yml /etc/sql_exporter.yml +COPY --chmod=0644 compute/etc/neon_collector.yml /etc/neon_collector.yml +COPY --chmod=0644 compute/etc/sql_exporter_autoscaling.yml /etc/sql_exporter_autoscaling.yml +COPY --chmod=0644 compute/etc/neon_collector_autoscaling.yml /etc/neon_collector_autoscaling.yml + # Create remote extension download directory RUN mkdir /usr/local/download_extensions && chown -R postgres:postgres /usr/local/download_extensions + # Install: # libreadline8 for psql # libicu67, locales for collations (including ICU and plpgsql_check) diff --git a/compute/README.md b/compute/README.md new file mode 100644 index 0000000000..bb1e42ab53 --- /dev/null +++ b/compute/README.md @@ -0,0 +1,21 @@ +This directory contains files that are needed to build the compute +images, or included in the compute images. + +Dockerfile.compute-node + To build the compute image + +vm-image-spec.yaml + Instructions for vm-builder, to turn the compute-node image into + corresponding vm-compute-node image. + +etc/ + Configuration files included in /etc in the compute image + +patches/ + Some extensions need to be patched to work with Neon. This + directory contains such patches. They are applied to the extension + sources in Dockerfile.compute-node + +In addition to these, postgres itself, the neon postgres extension, +and compute_ctl are built and copied into the compute image by +Dockerfile.compute-node. diff --git a/compute/etc/neon_collector.yml b/compute/etc/neon_collector.yml new file mode 100644 index 0000000000..29be0958dd --- /dev/null +++ b/compute/etc/neon_collector.yml @@ -0,0 +1,247 @@ +collector_name: neon_collector +metrics: +- metric_name: lfc_misses + type: gauge + help: 'lfc_misses' + key_labels: + values: [lfc_misses] + query: | + select lfc_value as lfc_misses from neon.neon_lfc_stats where lfc_key='file_cache_misses'; + +- metric_name: lfc_used + type: gauge + help: 'LFC chunks used (chunk = 1MB)' + key_labels: + values: [lfc_used] + query: | + select lfc_value as lfc_used from neon.neon_lfc_stats where lfc_key='file_cache_used'; + +- metric_name: lfc_hits + type: gauge + help: 'lfc_hits' + key_labels: + values: [lfc_hits] + query: | + select lfc_value as lfc_hits from neon.neon_lfc_stats where lfc_key='file_cache_hits'; + +- metric_name: lfc_writes + type: gauge + help: 'lfc_writes' + key_labels: + values: [lfc_writes] + query: | + select lfc_value as lfc_writes from neon.neon_lfc_stats where lfc_key='file_cache_writes'; + +- metric_name: lfc_cache_size_limit + type: gauge + help: 'LFC cache size limit in bytes' + key_labels: + values: [lfc_cache_size_limit] + query: | + select pg_size_bytes(current_setting('neon.file_cache_size_limit')) as lfc_cache_size_limit; + +- metric_name: connection_counts + type: gauge + help: 'Connection counts' + key_labels: + - datname + - state + values: [count] + query: | + select datname, state, count(*) as count from pg_stat_activity where state <> '' group by datname, state; + +- metric_name: pg_stats_userdb + type: gauge + help: 'Stats for several oldest non-system dbs' + key_labels: + - datname + value_label: kind + values: + - db_size + - deadlocks + # Rows + - inserted + - updated + - deleted + # We export stats for 10 non-system database. Without this limit + # it is too easy to abuse the system by creating lots of databases. + query: | + select pg_database_size(datname) as db_size, deadlocks, + tup_inserted as inserted, tup_updated as updated, tup_deleted as deleted, + datname + from pg_stat_database + where datname IN ( + select datname + from pg_database + where datname <> 'postgres' and not datistemplate + order by oid + limit 10 + ); + +- metric_name: max_cluster_size + type: gauge + help: 'neon.max_cluster_size setting' + key_labels: + values: [max_cluster_size] + query: | + select setting::int as max_cluster_size from pg_settings where name = 'neon.max_cluster_size'; + +- metric_name: db_total_size + type: gauge + help: 'Size of all databases' + key_labels: + values: [total] + query: | + select sum(pg_database_size(datname)) as total from pg_database; + +# DEPRECATED +- metric_name: lfc_approximate_working_set_size + type: gauge + help: 'Approximate working set size in pages of 8192 bytes' + key_labels: + values: [approximate_working_set_size] + query: | + select neon.approximate_working_set_size(false) as approximate_working_set_size; + +- metric_name: lfc_approximate_working_set_size_windows + type: gauge + help: 'Approximate working set size in pages of 8192 bytes' + key_labels: [duration] + values: [size] + # NOTE: This is the "public" / "human-readable" version. Here, we supply a small selection + # of durations in a pretty-printed form. + query: | + select + x as duration, + neon.approximate_working_set_size_seconds(extract('epoch' from x::interval)::int) as size + from + (values ('5m'),('15m'),('1h')) as t (x); + +- metric_name: compute_current_lsn + type: gauge + help: 'Current LSN of the database' + key_labels: + values: [lsn] + query: | + select + case + when pg_catalog.pg_is_in_recovery() + then (pg_last_wal_replay_lsn() - '0/0')::FLOAT8 + else (pg_current_wal_lsn() - '0/0')::FLOAT8 + end as lsn; + +- metric_name: compute_receive_lsn + type: gauge + help: 'Returns the last write-ahead log location that has been received and synced to disk by streaming replication' + key_labels: + values: [lsn] + query: | + SELECT + CASE + WHEN pg_catalog.pg_is_in_recovery() + THEN (pg_last_wal_receive_lsn() - '0/0')::FLOAT8 + ELSE 0 + END AS lsn; + +- metric_name: replication_delay_bytes + type: gauge + help: 'Bytes between received and replayed LSN' + key_labels: + values: [replication_delay_bytes] + # We use a GREATEST call here because this calculation can be negative. + # The calculation is not atomic, meaning after we've gotten the receive + # LSN, the replay LSN may have advanced past the receive LSN we + # are using for the calculation. + query: | + SELECT GREATEST(0, pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn())) AS replication_delay_bytes; + +- metric_name: replication_delay_seconds + type: gauge + help: 'Time since last LSN was replayed' + key_labels: + values: [replication_delay_seconds] + query: | + SELECT + CASE + WHEN pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() THEN 0 + ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) + END AS replication_delay_seconds; + +- metric_name: checkpoints_req + type: gauge + help: 'Number of requested checkpoints' + key_labels: + values: [checkpoints_req] + query: | + SELECT checkpoints_req FROM pg_stat_bgwriter; + +- metric_name: checkpoints_timed + type: gauge + help: 'Number of scheduled checkpoints' + key_labels: + values: [checkpoints_timed] + query: | + SELECT checkpoints_timed FROM pg_stat_bgwriter; + +- metric_name: compute_logical_snapshot_files + type: gauge + help: 'Number of snapshot files in pg_logical/snapshot' + key_labels: + - timeline_id + values: [num_logical_snapshot_files] + query: | + SELECT + (SELECT setting FROM pg_settings WHERE name = 'neon.timeline_id') AS timeline_id, + -- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp. These + -- temporary snapshot files are renamed to the actual snapshot files after they are + -- completely built. We only WAL-log the completely built snapshot files. + (SELECT COUNT(*) FROM pg_ls_logicalsnapdir() WHERE name LIKE '%.snap') AS num_logical_snapshot_files; + +# In all the below metrics, we cast LSNs to floats because Prometheus only supports floats. +# It's probably fine because float64 can store integers from -2^53 to +2^53 exactly. + +# Number of slots is limited by max_replication_slots, so collecting position for all of them shouldn't be bad. +- metric_name: logical_slot_restart_lsn + type: gauge + help: 'restart_lsn of logical slots' + key_labels: + - slot_name + values: [restart_lsn] + query: | + select slot_name, (restart_lsn - '0/0')::FLOAT8 as restart_lsn + from pg_replication_slots + where slot_type = 'logical'; + +- metric_name: compute_subscriptions_count + type: gauge + help: 'Number of logical replication subscriptions grouped by enabled/disabled' + key_labels: + - enabled + values: [subscriptions_count] + query: | + select subenabled::text as enabled, count(*) as subscriptions_count + from pg_subscription + group by subenabled; + +- metric_name: retained_wal + type: gauge + help: 'Retained WAL in inactive replication slots' + key_labels: + - slot_name + values: [retained_wal] + query: | + SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)::FLOAT8 AS retained_wal + FROM pg_replication_slots + WHERE active = false; + +- metric_name: wal_is_lost + type: gauge + help: 'Whether or not the replication slot wal_status is lost' + key_labels: + - slot_name + values: [wal_is_lost] + query: | + SELECT slot_name, + CASE WHEN wal_status = 'lost' THEN 1 ELSE 0 END AS wal_is_lost + FROM pg_replication_slots; + diff --git a/compute/etc/neon_collector_autoscaling.yml b/compute/etc/neon_collector_autoscaling.yml new file mode 100644 index 0000000000..5616264eba --- /dev/null +++ b/compute/etc/neon_collector_autoscaling.yml @@ -0,0 +1,55 @@ +collector_name: neon_collector_autoscaling +metrics: +- metric_name: lfc_misses + type: gauge + help: 'lfc_misses' + key_labels: + values: [lfc_misses] + query: | + select lfc_value as lfc_misses from neon.neon_lfc_stats where lfc_key='file_cache_misses'; + +- metric_name: lfc_used + type: gauge + help: 'LFC chunks used (chunk = 1MB)' + key_labels: + values: [lfc_used] + query: | + select lfc_value as lfc_used from neon.neon_lfc_stats where lfc_key='file_cache_used'; + +- metric_name: lfc_hits + type: gauge + help: 'lfc_hits' + key_labels: + values: [lfc_hits] + query: | + select lfc_value as lfc_hits from neon.neon_lfc_stats where lfc_key='file_cache_hits'; + +- metric_name: lfc_writes + type: gauge + help: 'lfc_writes' + key_labels: + values: [lfc_writes] + query: | + select lfc_value as lfc_writes from neon.neon_lfc_stats where lfc_key='file_cache_writes'; + +- metric_name: lfc_cache_size_limit + type: gauge + help: 'LFC cache size limit in bytes' + key_labels: + values: [lfc_cache_size_limit] + query: | + select pg_size_bytes(current_setting('neon.file_cache_size_limit')) as lfc_cache_size_limit; + +- metric_name: lfc_approximate_working_set_size_windows + type: gauge + help: 'Approximate working set size in pages of 8192 bytes' + key_labels: [duration_seconds] + values: [size] + # NOTE: This is the "internal" / "machine-readable" version. This outputs the working set + # size looking back 1..60 minutes, labeled with the number of minutes. + query: | + select + x::text as duration_seconds, + neon.approximate_working_set_size_seconds(x) as size + from + (select generate_series * 60 as x from generate_series(1, 60)) as t (x); diff --git a/compute/etc/pgbouncer.ini b/compute/etc/pgbouncer.ini new file mode 100644 index 0000000000..cb994f961c --- /dev/null +++ b/compute/etc/pgbouncer.ini @@ -0,0 +1,17 @@ +[databases] +*=host=localhost port=5432 auth_user=cloud_admin +[pgbouncer] +listen_port=6432 +listen_addr=0.0.0.0 +auth_type=scram-sha-256 +auth_user=cloud_admin +auth_dbname=postgres +client_tls_sslmode=disable +server_tls_sslmode=disable +pool_mode=transaction +max_client_conn=10000 +default_pool_size=64 +max_prepared_statements=0 +admin_users=postgres +unix_socket_dir=/tmp/ +unix_socket_mode=0777 diff --git a/compute/etc/sql_exporter.yml b/compute/etc/sql_exporter.yml new file mode 100644 index 0000000000..139d04468a --- /dev/null +++ b/compute/etc/sql_exporter.yml @@ -0,0 +1,33 @@ +# Configuration for sql_exporter +# Global defaults. +global: + # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. + scrape_timeout: 10s + # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. + scrape_timeout_offset: 500ms + # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. + min_interval: 0s + # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, + # as will concurrent scrapes. + max_connections: 1 + # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should + # always be the same as max_connections. + max_idle_connections: 1 + # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. + # If 0, connections are not closed due to a connection's age. + max_connection_lifetime: 5m + +# The target to monitor and the collectors to execute on it. +target: + # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) + # the schema gets dropped or replaced to match the driver expected DSN format. + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' + + # Collectors (referenced by name) to execute on the target. + # Glob patterns are supported (see for syntax). + collectors: [neon_collector] + +# Collector files specifies a list of globs. One collector definition is read from each matching file. +# Glob patterns are supported (see for syntax). +collector_files: + - "neon_collector.yml" diff --git a/compute/etc/sql_exporter_autoscaling.yml b/compute/etc/sql_exporter_autoscaling.yml new file mode 100644 index 0000000000..044557233e --- /dev/null +++ b/compute/etc/sql_exporter_autoscaling.yml @@ -0,0 +1,33 @@ +# Configuration for sql_exporter for autoscaling-agent +# Global defaults. +global: + # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. + scrape_timeout: 10s + # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. + scrape_timeout_offset: 500ms + # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. + min_interval: 0s + # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, + # as will concurrent scrapes. + max_connections: 1 + # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should + # always be the same as max_connections. + max_idle_connections: 1 + # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. + # If 0, connections are not closed due to a connection's age. + max_connection_lifetime: 5m + +# The target to monitor and the collectors to execute on it. +target: + # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) + # the schema gets dropped or replaced to match the driver expected DSN format. + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' + + # Collectors (referenced by name) to execute on the target. + # Glob patterns are supported (see for syntax). + collectors: [neon_collector_autoscaling] + +# Collector files specifies a list of globs. One collector definition is read from each matching file. +# Glob patterns are supported (see for syntax). +collector_files: + - "neon_collector_autoscaling.yml" diff --git a/patches/pg_anon.patch b/compute/patches/pg_anon.patch similarity index 100% rename from patches/pg_anon.patch rename to compute/patches/pg_anon.patch diff --git a/patches/pg_cron.patch b/compute/patches/pg_cron.patch similarity index 100% rename from patches/pg_cron.patch rename to compute/patches/pg_cron.patch diff --git a/patches/pg_hint_plan.patch b/compute/patches/pg_hint_plan.patch similarity index 100% rename from patches/pg_hint_plan.patch rename to compute/patches/pg_hint_plan.patch diff --git a/patches/pgvector.patch b/compute/patches/pgvector.patch similarity index 100% rename from patches/pgvector.patch rename to compute/patches/pgvector.patch diff --git a/patches/rum.patch b/compute/patches/rum.patch similarity index 100% rename from patches/rum.patch rename to compute/patches/rum.patch diff --git a/compute/vm-image-spec.yaml b/compute/vm-image-spec.yaml new file mode 100644 index 0000000000..0af44745e5 --- /dev/null +++ b/compute/vm-image-spec.yaml @@ -0,0 +1,112 @@ +# Supplemental file for neondatabase/autoscaling's vm-builder, for producing the VM compute image. +--- +commands: + - name: cgconfigparser + user: root + sysvInitAction: sysinit + shell: 'cgconfigparser -l /etc/cgconfig.conf -s 1664' + # restrict permissions on /neonvm/bin/resize-swap, because we grant access to compute_ctl for + # running it as root. + - name: chmod-resize-swap + user: root + sysvInitAction: sysinit + shell: 'chmod 711 /neonvm/bin/resize-swap' + - name: pgbouncer + user: postgres + sysvInitAction: respawn + shell: '/usr/local/bin/pgbouncer /etc/pgbouncer.ini' + - name: postgres-exporter + user: nobody + sysvInitAction: respawn + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter' + - name: sql-exporter + user: nobody + sysvInitAction: respawn + shell: '/bin/sql_exporter -config.file=/etc/sql_exporter.yml -web.listen-address=:9399' + - name: sql-exporter-autoscaling + user: nobody + sysvInitAction: respawn + shell: '/bin/sql_exporter -config.file=/etc/sql_exporter_autoscaling.yml -web.listen-address=:9499' +shutdownHook: | + su -p postgres --session-command '/usr/local/bin/pg_ctl stop -D /var/db/postgres/compute/pgdata -m fast --wait -t 10' +files: + - filename: compute_ctl-resize-swap + content: | + # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap + # as root without requiring entering a password (NOPASSWD), regardless of hostname (ALL) + postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap + - filename: cgconfig.conf + content: | + # Configuration for cgroups in VM compute nodes + group neon-postgres { + perm { + admin { + uid = postgres; + } + task { + gid = users; + } + } + memory {} + } +build: | + # Build cgroup-tools + # + # At time of writing (2023-03-14), debian bullseye has a version of cgroup-tools (technically + # libcgroup) that doesn't support cgroup v2 (version 0.41-11). Unfortunately, the vm-monitor + # requires cgroup v2, so we'll build cgroup-tools ourselves. + FROM debian:bullseye-slim as libcgroup-builder + ENV LIBCGROUP_VERSION=v2.0.3 + + RUN set -exu \ + && apt update \ + && apt install --no-install-recommends -y \ + git \ + ca-certificates \ + automake \ + cmake \ + make \ + gcc \ + byacc \ + flex \ + libtool \ + libpam0g-dev \ + && git clone --depth 1 -b $LIBCGROUP_VERSION https://github.com/libcgroup/libcgroup \ + && INSTALL_DIR="/libcgroup-install" \ + && mkdir -p "$INSTALL_DIR/bin" "$INSTALL_DIR/include" \ + && cd libcgroup \ + # extracted from bootstrap.sh, with modified flags: + && (test -d m4 || mkdir m4) \ + && autoreconf -fi \ + && rm -rf autom4te.cache \ + && CFLAGS="-O3" ./configure --prefix="$INSTALL_DIR" --sysconfdir=/etc --localstatedir=/var --enable-opaque-hierarchy="name=systemd" \ + # actually build the thing... + && make install +merge: | + # tweak nofile limits + RUN set -e \ + && echo 'fs.file-max = 1048576' >>/etc/sysctl.conf \ + && test ! -e /etc/security || ( \ + echo '* - nofile 1048576' >>/etc/security/limits.conf \ + && echo 'root - nofile 1048576' >>/etc/security/limits.conf \ + ) + + # Allow postgres user (compute_ctl) to run swap resizer. + # Need to install sudo in order to allow this. + # + # Also, remove the 'read' permission from group/other on /neonvm/bin/resize-swap, just to be safe. + RUN set -e \ + && apt update \ + && apt install --no-install-recommends -y \ + sudo \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + COPY compute_ctl-resize-swap /etc/sudoers.d/compute_ctl-resize-swap + + COPY cgconfig.conf /etc/cgconfig.conf + + RUN set -e \ + && chmod 0644 /etc/cgconfig.conf + + COPY --from=libcgroup-builder /libcgroup-install/bin/* /usr/bin/ + COPY --from=libcgroup-builder /libcgroup-install/lib/* /usr/lib/ + COPY --from=libcgroup-builder /libcgroup-install/sbin/* /usr/sbin/ diff --git a/libs/pageserver_api/src/config.rs b/libs/pageserver_api/src/config.rs index 61e32bc9ab..95310fdbac 100644 --- a/libs/pageserver_api/src/config.rs +++ b/libs/pageserver_api/src/config.rs @@ -104,9 +104,6 @@ pub struct ConfigToml { pub image_compression: ImageCompressionAlgorithm, pub ephemeral_bytes_per_memory_kb: usize, pub l0_flush: Option, - #[serde(skip_serializing)] - // TODO(https://github.com/neondatabase/neon/issues/8184): remove after this field is removed from all pageserver.toml's - pub compact_level0_phase1_value_access: serde::de::IgnoredAny, pub virtual_file_direct_io: crate::models::virtual_file::DirectIoMode, pub io_buffer_alignment: usize, } @@ -384,7 +381,6 @@ impl Default for ConfigToml { image_compression: (DEFAULT_IMAGE_COMPRESSION), ephemeral_bytes_per_memory_kb: (DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB), l0_flush: None, - compact_level0_phase1_value_access: Default::default(), virtual_file_direct_io: crate::models::virtual_file::DirectIoMode::default(), io_buffer_alignment: DEFAULT_IO_BUFFER_ALIGNMENT, diff --git a/pageserver/benches/bench_walredo.rs b/pageserver/benches/bench_walredo.rs index edc09d0bf2..45936cb3fa 100644 --- a/pageserver/benches/bench_walredo.rs +++ b/pageserver/benches/bench_walredo.rs @@ -1,7 +1,7 @@ //! Quantify a single walredo manager's throughput under N concurrent callers. //! //! The benchmark implementation ([`bench_impl`]) is parametrized by -//! - `redo_work` => [`Request::short_request`] or [`Request::medium_request`] +//! - `redo_work` => an async closure that takes a `PostgresRedoManager` and performs one redo //! - `n_redos` => number of times the benchmark shell execute the `redo_work` //! - `nclients` => number of clients (more on this shortly). //! @@ -10,7 +10,7 @@ //! Each task executes the `redo_work` `n_redos/nclients` times. //! //! We exercise the following combinations: -//! - `redo_work = short / medium`` +//! - `redo_work = ping / short / medium`` //! - `nclients = [1, 2, 4, 8, 16, 32, 64, 128]` //! //! We let `criterion` determine the `n_redos` using `iter_custom`. @@ -27,33 +27,43 @@ //! //! # Reference Numbers //! -//! 2024-04-15 on i3en.3xlarge +//! 2024-09-18 on im4gn.2xlarge //! //! ```text -//! short/1 time: [24.584 µs 24.737 µs 24.922 µs] -//! short/2 time: [33.479 µs 33.660 µs 33.888 µs] -//! short/4 time: [42.713 µs 43.046 µs 43.440 µs] -//! short/8 time: [71.814 µs 72.478 µs 73.240 µs] -//! short/16 time: [132.73 µs 134.45 µs 136.22 µs] -//! short/32 time: [258.31 µs 260.73 µs 263.27 µs] -//! short/64 time: [511.61 µs 514.44 µs 517.51 µs] -//! short/128 time: [992.64 µs 998.23 µs 1.0042 ms] -//! medium/1 time: [110.11 µs 110.50 µs 110.96 µs] -//! medium/2 time: [153.06 µs 153.85 µs 154.99 µs] -//! medium/4 time: [317.51 µs 319.92 µs 322.85 µs] -//! medium/8 time: [638.30 µs 644.68 µs 652.12 µs] -//! medium/16 time: [1.2651 ms 1.2773 ms 1.2914 ms] -//! medium/32 time: [2.5117 ms 2.5410 ms 2.5720 ms] -//! medium/64 time: [4.8088 ms 4.8555 ms 4.9047 ms] -//! medium/128 time: [8.8311 ms 8.9849 ms 9.1263 ms] +//! ping/1 time: [21.789 µs 21.918 µs 22.078 µs] +//! ping/2 time: [27.686 µs 27.812 µs 27.970 µs] +//! ping/4 time: [35.468 µs 35.671 µs 35.926 µs] +//! ping/8 time: [59.682 µs 59.987 µs 60.363 µs] +//! ping/16 time: [101.79 µs 102.37 µs 103.08 µs] +//! ping/32 time: [184.18 µs 185.15 µs 186.36 µs] +//! ping/64 time: [349.86 µs 351.45 µs 353.47 µs] +//! ping/128 time: [684.53 µs 687.98 µs 692.17 µs] +//! short/1 time: [31.833 µs 32.126 µs 32.428 µs] +//! short/2 time: [35.558 µs 35.756 µs 35.992 µs] +//! short/4 time: [44.850 µs 45.138 µs 45.484 µs] +//! short/8 time: [65.985 µs 66.379 µs 66.853 µs] +//! short/16 time: [127.06 µs 127.90 µs 128.87 µs] +//! short/32 time: [252.98 µs 254.70 µs 256.73 µs] +//! short/64 time: [497.13 µs 499.86 µs 503.26 µs] +//! short/128 time: [987.46 µs 993.45 µs 1.0004 ms] +//! medium/1 time: [137.91 µs 138.55 µs 139.35 µs] +//! medium/2 time: [192.00 µs 192.91 µs 194.07 µs] +//! medium/4 time: [389.62 µs 391.55 µs 394.01 µs] +//! medium/8 time: [776.80 µs 780.33 µs 784.77 µs] +//! medium/16 time: [1.5323 ms 1.5383 ms 1.5459 ms] +//! medium/32 time: [3.0120 ms 3.0226 ms 3.0350 ms] +//! medium/64 time: [5.7405 ms 5.7787 ms 5.8166 ms] +//! medium/128 time: [10.412 ms 10.574 ms 10.718 ms] //! ``` use anyhow::Context; use bytes::{Buf, Bytes}; use criterion::{BenchmarkId, Criterion}; +use once_cell::sync::Lazy; use pageserver::{config::PageServerConf, walrecord::NeonWalRecord, walredo::PostgresRedoManager}; use pageserver_api::{key::Key, shard::TenantShardId}; use std::{ + future::Future, sync::Arc, time::{Duration, Instant}, }; @@ -61,40 +71,59 @@ use tokio::{sync::Barrier, task::JoinSet}; use utils::{id::TenantId, lsn::Lsn}; fn bench(c: &mut Criterion) { - { - let nclients = [1, 2, 4, 8, 16, 32, 64, 128]; - for nclients in nclients { - let mut group = c.benchmark_group("short"); - group.bench_with_input( - BenchmarkId::from_parameter(nclients), - &nclients, - |b, nclients| { - let redo_work = Arc::new(Request::short_input()); - b.iter_custom(|iters| bench_impl(Arc::clone(&redo_work), iters, *nclients)); - }, - ); - } - } - { - let nclients = [1, 2, 4, 8, 16, 32, 64, 128]; - for nclients in nclients { - let mut group = c.benchmark_group("medium"); - group.bench_with_input( - BenchmarkId::from_parameter(nclients), - &nclients, - |b, nclients| { - let redo_work = Arc::new(Request::medium_input()); - b.iter_custom(|iters| bench_impl(Arc::clone(&redo_work), iters, *nclients)); - }, - ); - } + macro_rules! bench_group { + ($name:expr, $redo_work:expr) => {{ + let name: &str = $name; + let nclients = [1, 2, 4, 8, 16, 32, 64, 128]; + for nclients in nclients { + let mut group = c.benchmark_group(name); + group.bench_with_input( + BenchmarkId::from_parameter(nclients), + &nclients, + |b, nclients| { + b.iter_custom(|iters| bench_impl($redo_work, iters, *nclients)); + }, + ); + } + }}; } + // + // benchmark the protocol implementation + // + let pg_version = 14; + bench_group!( + "ping", + Arc::new(move |mgr: Arc| async move { + let _: () = mgr.ping(pg_version).await.unwrap(); + }) + ); + // + // benchmarks with actual record redo + // + let make_redo_work = |req: &'static Request| { + Arc::new(move |mgr: Arc| async move { + let page = req.execute(&mgr).await.unwrap(); + assert_eq!(page.remaining(), 8192); + }) + }; + bench_group!("short", { + static REQUEST: Lazy = Lazy::new(Request::short_input); + make_redo_work(&REQUEST) + }); + bench_group!("medium", { + static REQUEST: Lazy = Lazy::new(Request::medium_input); + make_redo_work(&REQUEST) + }); } criterion::criterion_group!(benches, bench); criterion::criterion_main!(benches); // Returns the sum of each client's wall-clock time spent executing their share of the n_redos. -fn bench_impl(redo_work: Arc, n_redos: u64, nclients: u64) -> Duration { +fn bench_impl(redo_work: Arc, n_redos: u64, nclients: u64) -> Duration +where + F: Fn(Arc) -> Fut + Send + Sync + 'static, + Fut: Future + Send + 'static, +{ let repo_dir = camino_tempfile::tempdir_in(env!("CARGO_TARGET_TMPDIR")).unwrap(); let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf()); @@ -135,17 +164,20 @@ fn bench_impl(redo_work: Arc, n_redos: u64, nclients: u64) -> Duration }) } -async fn client( +async fn client( mgr: Arc, start: Arc, - redo_work: Arc, + redo_work: Arc, n_redos: u64, -) -> Duration { +) -> Duration +where + F: Fn(Arc) -> Fut + Send + Sync + 'static, + Fut: Future + Send + 'static, +{ start.wait().await; let start = Instant::now(); for _ in 0..n_redos { - let page = redo_work.execute(&mgr).await.unwrap(); - assert_eq!(page.remaining(), 8192); + redo_work(Arc::clone(&mgr)).await; // The real pageserver will rarely if ever do 2 walredos in a row without // yielding to the executor. tokio::task::yield_now().await; diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index a68f45a6d9..2d95ac42e6 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -432,7 +432,7 @@ impl Client { self.mgmt_api_endpoint ); - self.request(Method::POST, &uri, req) + self.request(Method::PUT, &uri, req) .await? .json() .await diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 8567c6aa52..e15f1c791b 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -324,7 +324,6 @@ impl PageServerConf { max_vectored_read_bytes, image_compression, ephemeral_bytes_per_memory_kb, - compact_level0_phase1_value_access: _, l0_flush, virtual_file_direct_io, concurrent_tenant_warmup, @@ -535,16 +534,6 @@ mod tests { .expect("parse_and_validate"); } - #[test] - fn test_compactl0_phase1_access_mode_is_ignored_silently() { - let input = indoc::indoc! {r#" - [compact_level0_phase1_value_access] - mode = "streaming-kmerge" - validate = "key-lsn-value" - "#}; - toml_edit::de::from_str::(input).unwrap(); - } - /// If there's a typo in the pageserver config, we'd rather catch that typo /// and fail pageserver startup than silently ignoring the typo, leaving whoever /// made it in the believe that their config change is effective. diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index d645f3b7b6..6a10d4fb1c 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -2955,7 +2955,7 @@ pub fn make_router( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive", |r| api_handler(r, timeline_preserve_initdb_handler), ) - .post( + .put( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config", |r| api_handler(r, timeline_archival_config_handler), ) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 078d12f934..162e8d1836 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1383,7 +1383,7 @@ impl SmgrQueryTimePerTimeline { &'a self, op: SmgrQueryType, ctx: &'c RequestContext, - ) -> Option { + ) -> Option { let start = Instant::now(); self.global_started[op as usize].inc(); @@ -1534,7 +1534,7 @@ impl BasebackupQueryTime { pub(crate) fn start_recording<'c: 'a, 'a>( &'a self, ctx: &'c RequestContext, - ) -> BasebackupQueryTimeOngoingRecording<'_, '_> { + ) -> BasebackupQueryTimeOngoingRecording<'a, 'a> { let start = Instant::now(); match ctx.micros_spent_throttled.open() { Ok(()) => (), diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 5f8766ca2c..7aa313f031 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -840,6 +840,36 @@ impl Timeline { Ok(total_size * BLCKSZ as u64) } + /// Get a KeySpace that covers all the Keys that are in use at AND below the given LSN. This is only used + /// for gc-compaction. + /// + /// gc-compaction cannot use the same `collect_keyspace` function as the legacy compaction because it + /// processes data at multiple LSNs and needs to be aware of the fact that some key ranges might need to + /// be kept only for a specific range of LSN. + /// + /// Consider the case that the user created branches at LSN 10 and 20, where the user created a table A at + /// LSN 10 and dropped that table at LSN 20. `collect_keyspace` at LSN 10 will return the key range + /// corresponding to that table, while LSN 20 won't. The keyspace info at a single LSN is not enough to + /// determine which keys to retain/drop for gc-compaction. + /// + /// For now, it only drops AUX-v1 keys. But in the future, the function will be extended to return the keyspace + /// to be retained for each of the branch LSN. + /// + /// The return value is (dense keyspace, sparse keyspace). + pub(crate) async fn collect_gc_compaction_keyspace( + &self, + ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> { + let metadata_key_begin = Key::metadata_key_range().start; + let aux_v1_key = AUX_FILES_KEY; + let dense_keyspace = KeySpace { + ranges: vec![Key::MIN..aux_v1_key, aux_v1_key.next()..metadata_key_begin], + }; + Ok(( + dense_keyspace, + SparseKeySpace(KeySpace::single(Key::metadata_key_range())), + )) + } + /// /// Get a KeySpace that covers all the Keys that are in use at the given LSN. /// Anything that's not listed maybe removed from the underlying storage (from diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index e328cd2044..5ed63734f4 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -18,7 +18,6 @@ use camino::Utf8Path; use camino::Utf8PathBuf; use enumset::EnumSet; use futures::stream::FuturesUnordered; -use futures::FutureExt; use futures::StreamExt; use pageserver_api::models; use pageserver_api::models::AuxFilePolicy; @@ -34,6 +33,7 @@ use remote_storage::GenericRemoteStorage; use remote_storage::TimeoutOrCancel; use std::collections::BTreeMap; use std::fmt; +use std::future::Future; use std::sync::Weak; use std::time::SystemTime; use storage_broker::BrokerClientChannel; @@ -1031,13 +1031,9 @@ impl Tenant { } Ok(TenantPreload { - timelines: Self::load_timeline_metadata( - self, - remote_timeline_ids, - remote_storage, - cancel, - ) - .await?, + timelines: self + .load_timelines_metadata(remote_timeline_ids, remote_storage, cancel) + .await?, }) } @@ -1303,7 +1299,7 @@ impl Tenant { .await } - async fn load_timeline_metadata( + async fn load_timelines_metadata( self: &Arc, timeline_ids: HashSet, remote_storage: &GenericRemoteStorage, @@ -1311,33 +1307,10 @@ impl Tenant { ) -> anyhow::Result> { let mut part_downloads = JoinSet::new(); for timeline_id in timeline_ids { - let client = RemoteTimelineClient::new( - remote_storage.clone(), - self.deletion_queue_client.clone(), - self.conf, - self.tenant_shard_id, - timeline_id, - self.generation, - ); let cancel_clone = cancel.clone(); part_downloads.spawn( - async move { - debug!("starting index part download"); - - let index_part = client.download_index_file(&cancel_clone).await; - - debug!("finished index part download"); - - Result::<_, anyhow::Error>::Ok(TimelinePreload { - client, - timeline_id, - index_part, - }) - } - .map(move |res| { - res.with_context(|| format!("download index part for timeline {timeline_id}")) - }) - .instrument(info_span!("download_index_part", %timeline_id)), + self.load_timeline_metadata(timeline_id, remote_storage.clone(), cancel_clone) + .instrument(info_span!("download_index_part", %timeline_id)), ); } @@ -1348,8 +1321,7 @@ impl Tenant { next = part_downloads.join_next() => { match next { Some(result) => { - let preload_result = result.context("join preload task")?; - let preload = preload_result?; + let preload = result.context("join preload task")?; timeline_preloads.insert(preload.timeline_id, preload); }, None => { @@ -1366,6 +1338,36 @@ impl Tenant { Ok(timeline_preloads) } + fn load_timeline_metadata( + self: &Arc, + timeline_id: TimelineId, + remote_storage: GenericRemoteStorage, + cancel: CancellationToken, + ) -> impl Future { + let client = RemoteTimelineClient::new( + remote_storage.clone(), + self.deletion_queue_client.clone(), + self.conf, + self.tenant_shard_id, + timeline_id, + self.generation, + ); + async move { + debug_assert_current_span_has_tenant_and_timeline_id(); + debug!("starting index part download"); + + let index_part = client.download_index_file(&cancel).await; + + debug!("finished index part download"); + + TimelinePreload { + client, + timeline_id, + index_part, + } + } + } + pub(crate) async fn apply_timeline_archival_config( &self, timeline_id: TimelineId, @@ -3625,7 +3627,7 @@ impl Tenant { start_lsn: Lsn, ancestor: Option>, last_aux_file_policy: Option, - ) -> anyhow::Result { + ) -> anyhow::Result> { let tenant_shard_id = self.tenant_shard_id; let resources = self.build_timeline_resources(new_timeline_id); diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index cd252aa371..99bd0ece57 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -1,13 +1,13 @@ //! Common traits and structs for layers pub mod delta_layer; +pub mod filter_iterator; pub mod image_layer; pub mod inmemory_layer; pub(crate) mod layer; mod layer_desc; mod layer_name; pub mod merge_iterator; - pub mod split_writer; use crate::context::{AccessStatsBehavior, RequestContext}; diff --git a/pageserver/src/tenant/storage_layer/filter_iterator.rs b/pageserver/src/tenant/storage_layer/filter_iterator.rs new file mode 100644 index 0000000000..f45dd4b801 --- /dev/null +++ b/pageserver/src/tenant/storage_layer/filter_iterator.rs @@ -0,0 +1,205 @@ +use std::ops::Range; + +use anyhow::bail; +use pageserver_api::{ + key::Key, + keyspace::{KeySpace, SparseKeySpace}, +}; +use utils::lsn::Lsn; + +use crate::repository::Value; + +use super::merge_iterator::MergeIterator; + +/// A filter iterator over merge iterators (and can be easily extended to other types of iterators). +/// +/// The iterator will skip any keys not included in the keyspace filter. In other words, the keyspace filter contains the keys +/// to be retained. +pub struct FilterIterator<'a> { + inner: MergeIterator<'a>, + retain_key_filters: Vec>, + current_filter_idx: usize, +} + +impl<'a> FilterIterator<'a> { + pub fn create( + inner: MergeIterator<'a>, + dense_keyspace: KeySpace, + sparse_keyspace: SparseKeySpace, + ) -> anyhow::Result { + let mut retain_key_filters = Vec::new(); + retain_key_filters.extend(dense_keyspace.ranges); + retain_key_filters.extend(sparse_keyspace.0.ranges); + retain_key_filters.sort_by(|a, b| a.start.cmp(&b.start)); + // Verify key filters are non-overlapping and sorted + for window in retain_key_filters.windows(2) { + if window[0].end > window[1].start { + bail!( + "Key filters are overlapping: {:?} and {:?}", + window[0], + window[1] + ); + } + } + Ok(Self { + inner, + retain_key_filters, + current_filter_idx: 0, + }) + } + + pub async fn next(&mut self) -> anyhow::Result> { + while let Some(item) = self.inner.next().await? { + while self.current_filter_idx < self.retain_key_filters.len() + && item.0 >= self.retain_key_filters[self.current_filter_idx].end + { + // [filter region] [filter region] [filter region] + // ^ item + // ^ current filter + self.current_filter_idx += 1; + // [filter region] [filter region] [filter region] + // ^ item + // ^ current filter + } + if self.current_filter_idx >= self.retain_key_filters.len() { + // We already exhausted all filters, so we should return now + // [filter region] [filter region] [filter region] + // ^ item + // ^ current filter (nothing) + return Ok(None); + } + if self.retain_key_filters[self.current_filter_idx].contains(&item.0) { + // [filter region] [filter region] [filter region] + // ^ item + // ^ current filter + return Ok(Some(item)); + } + // If the key is not contained in the key retaining filters, continue to the next item. + // [filter region] [filter region] [filter region] + // ^ item + // ^ current filter + } + Ok(None) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use itertools::Itertools; + use pageserver_api::key::Key; + use utils::lsn::Lsn; + + use crate::{ + tenant::{ + harness::{TenantHarness, TIMELINE_ID}, + storage_layer::delta_layer::test::produce_delta_layer, + }, + DEFAULT_PG_VERSION, + }; + + async fn assert_filter_iter_equal( + filter_iter: &mut FilterIterator<'_>, + expect: &[(Key, Lsn, Value)], + ) { + let mut expect_iter = expect.iter(); + loop { + let o1 = filter_iter.next().await.unwrap(); + let o2 = expect_iter.next(); + assert_eq!(o1.is_some(), o2.is_some()); + if o1.is_none() && o2.is_none() { + break; + } + let (k1, l1, v1) = o1.unwrap(); + let (k2, l2, v2) = o2.unwrap(); + assert_eq!(&k1, k2); + assert_eq!(l1, *l2); + assert_eq!(&v1, v2); + } + } + + #[tokio::test] + async fn filter_keyspace_iterator() { + use crate::repository::Value; + use bytes::Bytes; + + let harness = TenantHarness::create("filter_iterator_filter_keyspace_iterator") + .await + .unwrap(); + let (tenant, ctx) = harness.load().await; + + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + fn get_key(id: u32) -> Key { + let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + const N: usize = 100; + let test_deltas1 = (0..N) + .map(|idx| { + ( + get_key(idx as u32), + Lsn(0x20 * ((idx as u64) % 10 + 1)), + Value::Image(Bytes::from(format!("img{idx:05}"))), + ) + }) + .collect_vec(); + let resident_layer_1 = produce_delta_layer(&tenant, &tline, test_deltas1.clone(), &ctx) + .await + .unwrap(); + + let merge_iter = MergeIterator::create( + &[resident_layer_1.get_as_delta(&ctx).await.unwrap()], + &[], + &ctx, + ); + + let mut filter_iter = FilterIterator::create( + merge_iter, + KeySpace { + ranges: vec![ + get_key(5)..get_key(10), + get_key(20)..get_key(30), + get_key(90)..get_key(110), + get_key(1000)..get_key(2000), + ], + }, + SparseKeySpace(KeySpace::default()), + ) + .unwrap(); + let mut result = Vec::new(); + result.extend(test_deltas1[5..10].iter().cloned()); + result.extend(test_deltas1[20..30].iter().cloned()); + result.extend(test_deltas1[90..100].iter().cloned()); + assert_filter_iter_equal(&mut filter_iter, &result).await; + + let merge_iter = MergeIterator::create( + &[resident_layer_1.get_as_delta(&ctx).await.unwrap()], + &[], + &ctx, + ); + + let mut filter_iter = FilterIterator::create( + merge_iter, + KeySpace { + ranges: vec![ + get_key(0)..get_key(10), + get_key(20)..get_key(30), + get_key(90)..get_key(95), + ], + }, + SparseKeySpace(KeySpace::default()), + ) + .unwrap(); + let mut result = Vec::new(); + result.extend(test_deltas1[0..10].iter().cloned()); + result.extend(test_deltas1[20..30].iter().cloned()); + result.extend(test_deltas1[90..95].iter().cloned()); + assert_filter_iter_equal(&mut filter_iter, &result).await; + } +} diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index d1567b6b39..6edc28a11b 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -31,6 +31,7 @@ use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder} use crate::page_cache; use crate::tenant::checks::check_valid_layermap; use crate::tenant::remote_timeline_client::WaitCompletionError; +use crate::tenant::storage_layer::filter_iterator::FilterIterator; use crate::tenant::storage_layer::merge_iterator::MergeIterator; use crate::tenant::storage_layer::split_writer::{ SplitDeltaLayerWriter, SplitImageLayerWriter, SplitWriterResult, @@ -1772,6 +1773,7 @@ impl Timeline { gc_cutoff, lowest_retain_lsn ); + // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs. // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point. let mut lsn_split_point = BTreeSet::new(); // TODO: use a better data structure (range tree / range set?) @@ -1820,7 +1822,12 @@ impl Timeline { image_layers.push(layer); } } - let mut merge_iter = MergeIterator::create(&delta_layers, &image_layers, ctx); + let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?; + let mut merge_iter = FilterIterator::create( + MergeIterator::create(&delta_layers, &image_layers, ctx), + dense_ks, + sparse_ks, + )?; // Step 2: Produce images+deltas. TODO: ensure newly-produced delta does not overlap with other deltas. // Data of the same key. let mut accumulated_values = Vec::new(); diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 0fe7def8b0..a1c9fc5651 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -205,6 +205,22 @@ impl PostgresRedoManager { } } + /// Do a ping request-response roundtrip. + /// + /// Not used in production, but by Rust benchmarks. + /// + /// # Cancel-Safety + /// + /// This method is cancellation-safe. + pub async fn ping(&self, pg_version: u32) -> Result<(), Error> { + self.do_with_walredo_process(pg_version, |proc| async move { + proc.ping(Duration::from_secs(1)) + .await + .map_err(Error::Other) + }) + .await + } + pub fn status(&self) -> WalRedoManagerStatus { WalRedoManagerStatus { last_redo_at: { @@ -297,6 +313,9 @@ impl PostgresRedoManager { } } + /// # Cancel-Safety + /// + /// This method is cancel-safe iff `closure` is cancel-safe. async fn do_with_walredo_process< F: FnOnce(Arc) -> Fut, Fut: Future>, @@ -537,6 +556,17 @@ mod tests { use tracing::Instrument; use utils::{id::TenantId, lsn::Lsn}; + #[tokio::test] + async fn test_ping() { + let h = RedoHarness::new().unwrap(); + + h.manager + .ping(14) + .instrument(h.span()) + .await + .expect("ping should work"); + } + #[tokio::test] async fn short_v14_redo() { let expected = std::fs::read("test_data/short_v14_redo.page").unwrap(); diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 9140d4f6aa..f3197e68b5 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -6,6 +6,7 @@ use self::no_leak_child::NoLeakChild; use crate::{ config::PageServerConf, metrics::{WalRedoKillCause, WAL_REDO_PROCESS_COUNTERS, WAL_REDO_RECORD_COUNTER}, + page_cache::PAGE_SZ, span::debug_assert_current_span_has_tenant_id, walrecord::NeonWalRecord, }; @@ -237,6 +238,26 @@ impl WalRedoProcess { res } + /// Do a ping request-response roundtrip. + /// + /// Not used in production, but by Rust benchmarks. + pub(crate) async fn ping(&self, timeout: Duration) -> anyhow::Result<()> { + let mut writebuf: Vec = Vec::with_capacity(4); + protocol::build_ping_msg(&mut writebuf); + let Ok(res) = tokio::time::timeout(timeout, self.apply_wal_records0(&writebuf)).await + else { + anyhow::bail!("WAL redo ping timed out"); + }; + let response = res?; + if response.len() != PAGE_SZ { + anyhow::bail!( + "WAL redo ping response should respond with page-sized response: {}", + response.len() + ); + } + Ok(()) + } + /// # Cancel-Safety /// /// When not polled to completion (e.g. because in `tokio::select!` another diff --git a/pageserver/src/walredo/process/protocol.rs b/pageserver/src/walredo/process/protocol.rs index b703344cc8..de3ca8741b 100644 --- a/pageserver/src/walredo/process/protocol.rs +++ b/pageserver/src/walredo/process/protocol.rs @@ -55,3 +55,8 @@ pub(crate) fn build_get_page_msg(tag: BufferTag, buf: &mut Vec) { tag.ser_into(buf) .expect("serialize BufferTag should always succeed"); } + +pub(crate) fn build_ping_msg(buf: &mut Vec) { + buf.put_u8(b'H'); + buf.put_u32(4); +} diff --git a/patches/cloud_regress_pg16.patch b/patches/cloud_regress_pg16.patch new file mode 100644 index 0000000000..d15d0cffeb --- /dev/null +++ b/patches/cloud_regress_pg16.patch @@ -0,0 +1,3949 @@ +diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out +index 0c24f6afe4..dd808ac2b4 100644 +--- a/src/test/regress/expected/aggregates.out ++++ b/src/test/regress/expected/aggregates.out +@@ -11,7 +11,8 @@ CREATE TABLE aggtest ( + b float4 + ); + \set filename :abs_srcdir '/data/agg.data' +-COPY aggtest FROM :'filename'; ++\set command '\\copy aggtest FROM ' :'filename'; ++:command + ANALYZE aggtest; + SELECT avg(four) AS avg_1 FROM onek; + avg_1 +diff --git a/src/test/regress/expected/alter_generic.out b/src/test/regress/expected/alter_generic.out +index ae54cb254f..888e2ee8bc 100644 +--- a/src/test/regress/expected/alter_generic.out ++++ b/src/test/regress/expected/alter_generic.out +@@ -15,9 +15,9 @@ DROP ROLE IF EXISTS regress_alter_generic_user1; + DROP ROLE IF EXISTS regress_alter_generic_user2; + DROP ROLE IF EXISTS regress_alter_generic_user3; + RESET client_min_messages; +-CREATE USER regress_alter_generic_user3; +-CREATE USER regress_alter_generic_user2; +-CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; ++CREATE USER regress_alter_generic_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_alter_generic_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_alter_generic_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE regress_alter_generic_user3; + CREATE SCHEMA alt_nsp1; + CREATE SCHEMA alt_nsp2; + GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; +@@ -370,7 +370,7 @@ ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY + DROP OPERATOR FAMILY alt_opf4 USING btree; + -- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP + BEGIN TRANSACTION; +-CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; ++CREATE ROLE regress_alter_generic_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER NOSUPERUSER; + CREATE OPERATOR FAMILY alt_opf5 USING btree; + SET ROLE regress_alter_generic_user5; + ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); +@@ -382,7 +382,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio + ROLLBACK; + -- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP + BEGIN TRANSACTION; +-CREATE ROLE regress_alter_generic_user6; ++CREATE ROLE regress_alter_generic_user6 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA alt_nsp6; + REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; + CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; +diff --git a/src/test/regress/expected/alter_operator.out b/src/test/regress/expected/alter_operator.out +index 71bd484282..066ea4ec0d 100644 +--- a/src/test/regress/expected/alter_operator.out ++++ b/src/test/regress/expected/alter_operator.out +@@ -127,7 +127,7 @@ ERROR: operator attribute "Restrict" not recognized + -- + -- Test permission check. Must be owner to ALTER OPERATOR. + -- +-CREATE USER regress_alter_op_user; ++CREATE USER regress_alter_op_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_alter_op_user; + ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); + ERROR: must be owner of operator === +diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out +index 0e439a6488..393f316c3e 100644 +--- a/src/test/regress/expected/alter_table.out ++++ b/src/test/regress/expected/alter_table.out +@@ -5,7 +5,7 @@ + SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_alter_table_user1; + RESET client_min_messages; +-CREATE USER regress_alter_table_user1; ++CREATE USER regress_alter_table_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- + -- add attribute + -- +@@ -3896,8 +3896,8 @@ DROP TABLE fail_part; + ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1); + ERROR: relation "nonexistent" does not exist + -- check ownership of the source table +-CREATE ROLE regress_test_me; +-CREATE ROLE regress_test_not_me; ++CREATE ROLE regress_test_me PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_not_me PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE not_owned_by_me (LIKE list_parted); + ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me; + SET SESSION AUTHORIZATION regress_test_me; +diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out +index 57a283dc59..9672d526b4 100644 +--- a/src/test/regress/expected/arrays.out ++++ b/src/test/regress/expected/arrays.out +@@ -18,7 +18,8 @@ CREATE TABLE array_op_test ( + t text[] + ); + \set filename :abs_srcdir '/data/array.data' +-COPY array_op_test FROM :'filename'; ++\set command '\\copy array_op_test FROM ' :'filename'; ++:command + ANALYZE array_op_test; + -- + -- only the 'e' array is 0-based, the others are 1-based. +diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out +index 93ed5e8cc0..54bd7d535c 100644 +--- a/src/test/regress/expected/btree_index.out ++++ b/src/test/regress/expected/btree_index.out +@@ -20,13 +20,17 @@ CREATE TABLE bt_f8_heap ( + random int4 + ); + \set filename :abs_srcdir '/data/desc.data' +-COPY bt_i4_heap FROM :'filename'; ++\set command '\\copy bt_i4_heap FROM ' :'filename'; ++:command + \set filename :abs_srcdir '/data/hash.data' +-COPY bt_name_heap FROM :'filename'; ++\set command '\\copy bt_name_heap FROM ' :'filename'; ++:command + \set filename :abs_srcdir '/data/desc.data' +-COPY bt_txt_heap FROM :'filename'; ++\set command '\\copy bt_txt_heap FROM ' :'filename'; ++:command + \set filename :abs_srcdir '/data/hash.data' +-COPY bt_f8_heap FROM :'filename'; ++\set command '\\copy bt_f8_heap FROM ' :'filename'; ++:command + ANALYZE bt_i4_heap; + ANALYZE bt_name_heap; + ANALYZE bt_txt_heap; +diff --git a/src/test/regress/expected/cluster.out b/src/test/regress/expected/cluster.out +index 542c2e098c..0062d3024f 100644 +--- a/src/test/regress/expected/cluster.out ++++ b/src/test/regress/expected/cluster.out +@@ -308,7 +308,7 @@ WHERE pg_class.oid=indexrelid + -- Verify that toast tables are clusterable + CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index; + -- Verify that clustering all tables does in fact cluster the right ones +-CREATE USER regress_clstr_user; ++CREATE USER regress_clstr_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE clstr_1 (a INT PRIMARY KEY); + CREATE TABLE clstr_2 (a INT PRIMARY KEY); + CREATE TABLE clstr_3 (a INT PRIMARY KEY); +@@ -497,7 +497,7 @@ DROP TABLE clstrpart; + CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i); + CREATE INDEX ptnowner_i_idx ON ptnowner(i); + CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1); +-CREATE ROLE regress_ptnowner; ++CREATE ROLE regress_ptnowner PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2); + ALTER TABLE ptnowner1 OWNER TO regress_ptnowner; + ALTER TABLE ptnowner OWNER TO regress_ptnowner; +diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out +index 97bbe53b64..eac3d42a79 100644 +--- a/src/test/regress/expected/collate.icu.utf8.out ++++ b/src/test/regress/expected/collate.icu.utf8.out +@@ -1016,7 +1016,7 @@ select * from collate_test1 where b ilike 'ABC'; + + reset enable_seqscan; + -- schema manipulation commands +-CREATE ROLE regress_test_role; ++CREATE ROLE regress_test_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA test_schema; + -- We need to do this this way to cope with varying names for encodings: + SET client_min_messages TO WARNING; +diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out +index cf0b80d616..e8e2a14a4a 100644 +--- a/src/test/regress/expected/constraints.out ++++ b/src/test/regress/expected/constraints.out +@@ -349,7 +349,8 @@ CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CONSTRAINT COPY_CON + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); + \set filename :abs_srcdir '/data/constro.data' +-COPY COPY_TBL FROM :'filename'; ++\set command '\\copy COPY_TBL FROM ' :'filename'; ++:command + SELECT * FROM COPY_TBL; + x | y | z + ---+---------------+--- +@@ -358,7 +359,8 @@ SELECT * FROM COPY_TBL; + (2 rows) + + \set filename :abs_srcdir '/data/constrf.data' +-COPY COPY_TBL FROM :'filename'; ++\set command '\\copy COPY_TBL FROM ' :'filename'; ++:command + ERROR: new row for relation "copy_tbl" violates check constraint "copy_con" + DETAIL: Failing row contains (7, check failed, 6). + CONTEXT: COPY copy_tbl, line 2: "7 check failed 6" +@@ -799,7 +801,7 @@ DETAIL: Key (f1)=(3) conflicts with key (f1)=(3). + DROP TABLE deferred_excl; + -- Comments + -- Setup a low-level role to enforce non-superuser checks. +-CREATE ROLE regress_constraint_comments; ++CREATE ROLE regress_constraint_comments PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_constraint_comments; + CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); + CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); +@@ -819,7 +821,7 @@ COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; + COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; + -- unauthorized user + RESET SESSION AUTHORIZATION; +-CREATE ROLE regress_constraint_comments_noaccess; ++CREATE ROLE regress_constraint_comments_noaccess PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; + COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; + ERROR: must be owner of relation constraint_comments_tbl +diff --git a/src/test/regress/expected/conversion.out b/src/test/regress/expected/conversion.out +index 442e7aff2b..525f732b03 100644 +--- a/src/test/regress/expected/conversion.out ++++ b/src/test/regress/expected/conversion.out +@@ -8,7 +8,7 @@ + CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) + AS :'regresslib', 'test_enc_conversion' + LANGUAGE C STRICT; +-CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE; ++CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_conversion_user; + CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; + -- +diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out +index b48365ec98..a6ef910055 100644 +--- a/src/test/regress/expected/copy.out ++++ b/src/test/regress/expected/copy.out +@@ -15,9 +15,11 @@ insert into copytest values('Unix',E'abc\ndef',2); + insert into copytest values('Mac',E'abc\rdef',3); + insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); + \set filename :abs_builddir '/results/copytest.csv' +-copy copytest to :'filename' csv; ++\set command '\\copy copytest to ' :'filename' csv; ++:command + create temp table copytest2 (like copytest); +-copy copytest2 from :'filename' csv; ++\set command '\\copy copytest2 from ' :'filename' csv; ++:command + select * from copytest except select * from copytest2; + style | test | filler + -------+------+-------- +@@ -25,8 +27,10 @@ select * from copytest except select * from copytest2; + + truncate copytest2; + --- same test but with an escape char different from quote char +-copy copytest to :'filename' csv quote '''' escape E'\\'; +-copy copytest2 from :'filename' csv quote '''' escape E'\\'; ++\set command '\\copy copytest to ' :'filename' ' csv quote ' '\'\'\'\'' ' escape ' 'E\'' '\\\\\''; ++:command ++\set command '\\copy copytest2 from ' :'filename' ' csv quote ' '\'\'\'\'' ' escape ' 'E\'' '\\\\\''; ++:command + select * from copytest except select * from copytest2; + style | test | filler + -------+------+-------- +@@ -66,13 +70,16 @@ insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; + insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; + insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; + \set filename :abs_builddir '/results/parted_copytest.csv' +-copy (select * from parted_copytest order by a) to :'filename'; ++\set command '\\copy (select * from parted_copytest order by a) to ' :'filename'; ++:command + truncate parted_copytest; +-copy parted_copytest from :'filename'; ++\set command '\\copy parted_copytest from ' :'filename'; ++:command + -- Ensure COPY FREEZE errors for partitioned tables. + begin; + truncate parted_copytest; +-copy parted_copytest from :'filename' (freeze); ++\set command '\\copy parted_copytest from ' :'filename' (freeze); ++:command + ERROR: cannot perform COPY FREEZE on a partitioned table + rollback; + select tableoid::regclass,count(*),sum(a) from parted_copytest +@@ -94,7 +101,8 @@ create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); +-copy parted_copytest from :'filename'; ++\set command '\\copy parted_copytest from ' :'filename'; ++:command + select tableoid::regclass,count(*),sum(a) from parted_copytest + group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +@@ -106,7 +114,8 @@ group by tableoid order by tableoid::regclass::name; + truncate table parted_copytest; + create index on parted_copytest (b); + drop trigger part_ins_trig on parted_copytest_a2; +-copy parted_copytest from stdin; ++\set command '\\copy parted_copytest from ' stdin; ++:command + -- Ensure index entries were properly added during the copy. + select * from parted_copytest where b = 1; + a | b | c +@@ -170,9 +179,9 @@ INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progre + -- Generate COPY FROM report with FILE, with some excluded tuples. + truncate tab_progress_reporting; + \set filename :abs_srcdir '/data/emp.data' +-copy tab_progress_reporting from :'filename' +- where (salary < 2000); +-INFO: progress: {"type": "FILE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": true, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true} ++\set command '\\copy tab_progress_reporting from ' :'filename' 'where (salary < 2000)'; ++:command ++INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true} + drop trigger check_after_tab_progress_reporting on tab_progress_reporting; + drop function notice_after_tab_progress_reporting(); + drop table tab_progress_reporting; +@@ -281,7 +290,8 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1); + -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org + -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us + \set filename :abs_srcdir '/data/desc.data' +-COPY parted_si(id, data) FROM :'filename'; ++\set command '\\COPY parted_si(id, data) FROM ' :'filename'; ++:command + -- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from + -- the wrong partition. This test is *not* guaranteed to trigger that bug, but + -- does so when shared_buffers is small enough. To test if we encountered the +diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out +index faf1a4d1b0..a44c97db52 100644 +--- a/src/test/regress/expected/copy2.out ++++ b/src/test/regress/expected/copy2.out +@@ -553,8 +553,8 @@ select * from check_con_tbl; + (2 rows) + + -- test with RLS enabled. +-CREATE ROLE regress_rls_copy_user; +-CREATE ROLE regress_rls_copy_user_colperms; ++CREATE ROLE regress_rls_copy_user PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_copy_user_colperms PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE rls_t1 (a int, b int, c int); + COPY rls_t1 (a, b, c) from stdin; + CREATE POLICY p1 ON rls_t1 FOR SELECT USING (a % 2 = 0); +diff --git a/src/test/regress/expected/create_function_sql.out b/src/test/regress/expected/create_function_sql.out +index 50aca5940f..42527142f6 100644 +--- a/src/test/regress/expected/create_function_sql.out ++++ b/src/test/regress/expected/create_function_sql.out +@@ -4,7 +4,7 @@ + -- Assorted tests using SQL-language functions + -- + -- All objects made in this test are in temp_func_test schema +-CREATE USER regress_unpriv_user; ++CREATE USER regress_unpriv_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA temp_func_test; + GRANT ALL ON SCHEMA temp_func_test TO public; + SET search_path TO temp_func_test, public; +diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out +index acfd9d1f4f..0eeb64e47a 100644 +--- a/src/test/regress/expected/create_index.out ++++ b/src/test/regress/expected/create_index.out +@@ -51,7 +51,8 @@ CREATE TABLE fast_emp4000 ( + home_base box + ); + \set filename :abs_srcdir '/data/rect.data' +-COPY slow_emp4000 FROM :'filename'; ++\set command '\\copy slow_emp4000 FROM ' :'filename'; ++:command + INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000; + ANALYZE slow_emp4000; + ANALYZE fast_emp4000; +@@ -655,7 +656,8 @@ CREATE TABLE array_index_op_test ( + t text[] + ); + \set filename :abs_srcdir '/data/array.data' +-COPY array_index_op_test FROM :'filename'; ++\set command '\\copy array_index_op_test FROM ' :'filename'; ++:command + ANALYZE array_index_op_test; + SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno; + seqno | i | t +@@ -2822,7 +2824,7 @@ END; + -- concurrently + REINDEX SCHEMA CONCURRENTLY schema_to_reindex; + -- Failure for unauthorized user +-CREATE ROLE regress_reindexuser NOLOGIN; ++CREATE ROLE regress_reindexuser NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION ROLE regress_reindexuser; + REINDEX SCHEMA schema_to_reindex; + ERROR: must be owner of schema schema_to_reindex +diff --git a/src/test/regress/expected/create_procedure.out b/src/test/regress/expected/create_procedure.out +index 2177ba3509..ae3ca94d00 100644 +--- a/src/test/regress/expected/create_procedure.out ++++ b/src/test/regress/expected/create_procedure.out +@@ -421,7 +421,7 @@ ERROR: cp_testfunc1(integer) is not a procedure + DROP PROCEDURE nonexistent(); + ERROR: procedure nonexistent() does not exist + -- privileges +-CREATE USER regress_cp_user1; ++CREATE USER regress_cp_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT INSERT ON cp_test TO regress_cp_user1; + REVOKE EXECUTE ON PROCEDURE ptest1(text) FROM PUBLIC; + SET ROLE regress_cp_user1; +diff --git a/src/test/regress/expected/create_role.out b/src/test/regress/expected/create_role.out +index 46d4f9efe9..fc2a28a2f6 100644 +--- a/src/test/regress/expected/create_role.out ++++ b/src/test/regress/expected/create_role.out +@@ -1,28 +1,28 @@ + -- ok, superuser can create users with any set of privileges +-CREATE ROLE regress_role_super SUPERUSER; +-CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; ++CREATE ROLE regress_role_super SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; +-CREATE ROLE regress_role_limited_admin CREATEROLE; +-CREATE ROLE regress_role_normal; ++CREATE ROLE regress_role_limited_admin CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_role_normal PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- fail, CREATEROLE user can't give away role attributes without having them + SET SESSION AUTHORIZATION regress_role_limited_admin; +-CREATE ROLE regress_nosuch_superuser SUPERUSER; ++CREATE ROLE regress_nosuch_superuser SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to create role + DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. +-CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; ++CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to create role + DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. +-CREATE ROLE regress_nosuch_replication REPLICATION; ++CREATE ROLE regress_nosuch_replication REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to create role + DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. +-CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; ++CREATE ROLE regress_nosuch_bypassrls BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to create role + DETAIL: Only roles with the BYPASSRLS attribute may create roles with the BYPASSRLS attribute. +-CREATE ROLE regress_nosuch_createdb CREATEDB; ++CREATE ROLE regress_nosuch_createdb CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to create role + DETAIL: Only roles with the CREATEDB attribute may create roles with the CREATEDB attribute. + -- ok, can create a role without any special attributes +-CREATE ROLE regress_role_limited; ++CREATE ROLE regress_role_limited PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- fail, can't give it in any of the restricted attributes + ALTER ROLE regress_role_limited SUPERUSER; + ERROR: permission denied to alter role +@@ -39,10 +39,10 @@ DETAIL: Only roles with the BYPASSRLS attribute may change the BYPASSRLS attrib + DROP ROLE regress_role_limited; + -- ok, can give away these role attributes if you have them + SET SESSION AUTHORIZATION regress_role_admin; +-CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; +-CREATE ROLE regress_replication REPLICATION; +-CREATE ROLE regress_bypassrls BYPASSRLS; +-CREATE ROLE regress_createdb CREATEDB; ++CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_replication REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_bypassrls BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_createdb CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- ok, can toggle these role attributes off and on if you have them + ALTER ROLE regress_replication NOREPLICATION; + ALTER ROLE regress_replication REPLICATION; +@@ -58,48 +58,48 @@ ALTER ROLE regress_createdb NOSUPERUSER; + ERROR: permission denied to alter role + DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. + -- ok, having CREATEROLE is enough to create users with these privileges +-CREATE ROLE regress_createrole CREATEROLE NOINHERIT; ++CREATE ROLE regress_createrole CREATEROLE NOINHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; +-CREATE ROLE regress_login LOGIN; +-CREATE ROLE regress_inherit INHERIT; +-CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; +-CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; +-CREATE ROLE regress_password_null PASSWORD NULL; ++CREATE ROLE regress_login LOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_inherit INHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_connection_limit CONNECTION LIMIT 5 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- ok, backwards compatible noise words should be ignored +-CREATE ROLE regress_noiseword SYSID 12345; ++CREATE ROLE regress_noiseword SYSID 12345 PASSWORD NEON_PASSWORD_PLACEHOLDER; + NOTICE: SYSID can no longer be specified + -- fail, cannot grant membership in superuser role +-CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; ++CREATE ROLE regress_nosuch_super IN ROLE regress_role_super PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: permission denied to grant role "regress_role_super" + DETAIL: Only roles with the SUPERUSER attribute may grant roles with the SUPERUSER attribute. + -- fail, database owner cannot have members +-CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; ++CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: role "pg_database_owner" cannot have explicit members + -- ok, can grant other users into a role + CREATE ROLE regress_inroles ROLE + regress_role_super, regress_createdb, regress_createrole, regress_login, +- regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; ++ regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- fail, cannot grant a role into itself +-CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; ++CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: role "regress_nosuch_recursive" is a member of role "regress_nosuch_recursive" + -- ok, can grant other users into a role with admin option + CREATE ROLE regress_adminroles ADMIN + regress_role_super, regress_createdb, regress_createrole, regress_login, +- regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; ++ regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- fail, cannot grant a role into itself with admin option +-CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; ++CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive PASSWORD NEON_PASSWORD_PLACEHOLDER; + ERROR: role "regress_nosuch_admin_recursive" is a member of role "regress_nosuch_admin_recursive" + -- fail, regress_createrole does not have CREATEDB privilege + SET SESSION AUTHORIZATION regress_createrole; + CREATE DATABASE regress_nosuch_db; + ERROR: permission denied to create database + -- ok, regress_createrole can create new roles +-CREATE ROLE regress_plainrole; ++CREATE ROLE regress_plainrole PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- ok, roles with CREATEROLE can create new roles with it +-CREATE ROLE regress_rolecreator CREATEROLE; ++CREATE ROLE regress_rolecreator CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- ok, roles with CREATEROLE can create new roles with different role + -- attributes, including CREATEROLE +-CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; ++CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- ok, we should be able to modify a role we created + COMMENT ON ROLE regress_hasprivs IS 'some comment'; + ALTER ROLE regress_hasprivs RENAME TO regress_tenant; +@@ -141,7 +141,7 @@ ERROR: permission denied to reassign objects + DETAIL: Only roles with privileges of role "regress_tenant" may reassign objects owned by it. + -- ok, create a role with a value for createrole_self_grant + SET createrole_self_grant = 'set, inherit'; +-CREATE ROLE regress_tenant2; ++CREATE ROLE regress_tenant2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_tenant2; + -- ok, regress_tenant2 can create objects within the database + SET SESSION AUTHORIZATION regress_tenant2; +@@ -165,34 +165,34 @@ ALTER TABLE tenant2_table OWNER TO regress_tenant2; + ERROR: must be able to SET ROLE "regress_tenant2" + DROP TABLE tenant2_table; + -- fail, CREATEROLE is not enough to create roles in privileged roles +-CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; ++CREATE ROLE regress_read_all_data PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_data; + ERROR: permission denied to grant role "pg_read_all_data" + DETAIL: Only roles with the ADMIN option on role "pg_read_all_data" may grant this role. +-CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; ++CREATE ROLE regress_write_all_data PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_write_all_data; + ERROR: permission denied to grant role "pg_write_all_data" + DETAIL: Only roles with the ADMIN option on role "pg_write_all_data" may grant this role. +-CREATE ROLE regress_monitor IN ROLE pg_monitor; ++CREATE ROLE regress_monitor PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_monitor; + ERROR: permission denied to grant role "pg_monitor" + DETAIL: Only roles with the ADMIN option on role "pg_monitor" may grant this role. +-CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; ++CREATE ROLE regress_read_all_settings PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_settings; + ERROR: permission denied to grant role "pg_read_all_settings" + DETAIL: Only roles with the ADMIN option on role "pg_read_all_settings" may grant this role. +-CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; ++CREATE ROLE regress_read_all_stats PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_stats; + ERROR: permission denied to grant role "pg_read_all_stats" + DETAIL: Only roles with the ADMIN option on role "pg_read_all_stats" may grant this role. +-CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; ++CREATE ROLE regress_stat_scan_tables PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_stat_scan_tables; + ERROR: permission denied to grant role "pg_stat_scan_tables" + DETAIL: Only roles with the ADMIN option on role "pg_stat_scan_tables" may grant this role. +-CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; ++CREATE ROLE regress_read_server_files PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_server_files; + ERROR: permission denied to grant role "pg_read_server_files" + DETAIL: Only roles with the ADMIN option on role "pg_read_server_files" may grant this role. +-CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; ++CREATE ROLE regress_write_server_files PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_write_server_files; + ERROR: permission denied to grant role "pg_write_server_files" + DETAIL: Only roles with the ADMIN option on role "pg_write_server_files" may grant this role. +-CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; ++CREATE ROLE regress_execute_server_program PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_execute_server_program; + ERROR: permission denied to grant role "pg_execute_server_program" + DETAIL: Only roles with the ADMIN option on role "pg_execute_server_program" may grant this role. +-CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; ++CREATE ROLE regress_signal_backend PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_signal_backend; + ERROR: permission denied to grant role "pg_signal_backend" + DETAIL: Only roles with the ADMIN option on role "pg_signal_backend" may grant this role. + -- fail, role still owns database objects +diff --git a/src/test/regress/expected/create_schema.out b/src/test/regress/expected/create_schema.out +index 93302a07ef..1a73f083ac 100644 +--- a/src/test/regress/expected/create_schema.out ++++ b/src/test/regress/expected/create_schema.out +@@ -2,7 +2,7 @@ + -- CREATE_SCHEMA + -- + -- Schema creation with elements. +-CREATE ROLE regress_create_schema_role SUPERUSER; ++CREATE ROLE regress_create_schema_role SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- Cases where schema creation fails as objects are qualified with a schema + -- that does not match with what's expected. + -- This checks all the object types that include schema qualifications. +diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out +index f3f8c7b5a2..3e3e54ff4c 100644 +--- a/src/test/regress/expected/create_view.out ++++ b/src/test/regress/expected/create_view.out +@@ -18,7 +18,8 @@ CREATE TABLE real_city ( + outline path + ); + \set filename :abs_srcdir '/data/real_city.data' +-COPY real_city FROM :'filename'; ++\set command '\\copy real_city FROM ' :'filename'; ++:command + ANALYZE real_city; + SELECT * + INTO TABLE ramp +diff --git a/src/test/regress/expected/database.out b/src/test/regress/expected/database.out +index 454db91ec0..01378d7081 100644 +--- a/src/test/regress/expected/database.out ++++ b/src/test/regress/expected/database.out +@@ -1,8 +1,7 @@ + CREATE DATABASE regression_tbd + ENCODING utf8 LC_COLLATE "C" LC_CTYPE "C" TEMPLATE template0; + ALTER DATABASE regression_tbd RENAME TO regression_utf8; +-ALTER DATABASE regression_utf8 SET TABLESPACE regress_tblspace; +-ALTER DATABASE regression_utf8 RESET TABLESPACE; ++WARNING: you need to manually restart any running background workers after this command + ALTER DATABASE regression_utf8 CONNECTION_LIMIT 123; + -- Test PgDatabaseToastTable. Doing this with GRANT would be slow. + BEGIN; +diff --git a/src/test/regress/expected/dependency.out b/src/test/regress/expected/dependency.out +index 6d9498cdd1..692cf979d0 100644 +--- a/src/test/regress/expected/dependency.out ++++ b/src/test/regress/expected/dependency.out +@@ -1,10 +1,10 @@ + -- + -- DEPENDENCIES + -- +-CREATE USER regress_dep_user; +-CREATE USER regress_dep_user2; +-CREATE USER regress_dep_user3; +-CREATE GROUP regress_dep_group; ++CREATE USER regress_dep_user PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_dep_group PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE deptest (f1 serial primary key, f2 text); + GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group; + GRANT ALL ON TABLE deptest TO regress_dep_user, regress_dep_user2; +@@ -41,9 +41,9 @@ ERROR: role "regress_dep_user3" cannot be dropped because some objects depend o + DROP TABLE deptest; + DROP USER regress_dep_user3; + -- Test DROP OWNED +-CREATE USER regress_dep_user0; +-CREATE USER regress_dep_user1; +-CREATE USER regress_dep_user2; ++CREATE USER regress_dep_user0 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_dep_user0; + -- permission denied + DROP OWNED BY regress_dep_user1; +diff --git a/src/test/regress/expected/drop_if_exists.out b/src/test/regress/expected/drop_if_exists.out +index 5e44c2c3ce..eb3bb329fb 100644 +--- a/src/test/regress/expected/drop_if_exists.out ++++ b/src/test/regress/expected/drop_if_exists.out +@@ -64,9 +64,9 @@ ERROR: type "test_domain_exists" does not exist + --- + --- role/user/group + --- +-CREATE USER regress_test_u1; +-CREATE ROLE regress_test_r1; +-CREATE GROUP regress_test_g1; ++CREATE USER regress_test_u1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_r1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_test_g1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + DROP USER regress_test_u2; + ERROR: role "regress_test_u2" does not exist + DROP USER IF EXISTS regress_test_u1, regress_test_u2; +diff --git a/src/test/regress/expected/equivclass.out b/src/test/regress/expected/equivclass.out +index 126f7047fe..0e2cc73426 100644 +--- a/src/test/regress/expected/equivclass.out ++++ b/src/test/regress/expected/equivclass.out +@@ -384,7 +384,7 @@ set enable_nestloop = on; + set enable_mergejoin = off; + alter table ec1 enable row level security; + create policy p1 on ec1 using (f1 < '5'::int8alias1); +-create user regress_user_ectest; ++create user regress_user_ectest PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant select on ec0 to regress_user_ectest; + grant select on ec1 to regress_user_ectest; + -- without any RLS, we'll treat {a.ff, b.ff, 43} as an EquivalenceClass +diff --git a/src/test/regress/expected/event_trigger.out b/src/test/regress/expected/event_trigger.out +index 5a10958df5..a578c06ebd 100644 +--- a/src/test/regress/expected/event_trigger.out ++++ b/src/test/regress/expected/event_trigger.out +@@ -85,7 +85,7 @@ create event trigger regress_event_trigger2 on ddl_command_start + -- OK + comment on event trigger regress_event_trigger is 'test comment'; + -- drop as non-superuser should fail +-create role regress_evt_user; ++create role regress_evt_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + set role regress_evt_user; + create event trigger regress_event_trigger_noperms on ddl_command_start + execute procedure test_event_trigger(); +diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out +index 6ed50fdcfa..caa00a345d 100644 +--- a/src/test/regress/expected/foreign_data.out ++++ b/src/test/regress/expected/foreign_data.out +@@ -14,13 +14,13 @@ CREATE FUNCTION test_fdw_handler() + SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_role2, regress_test_role_super, regress_test_indirect, regress_unprivileged_role; + RESET client_min_messages; +-CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER; ++CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_foreign_data_user'; +-CREATE ROLE regress_test_role; +-CREATE ROLE regress_test_role2; +-CREATE ROLE regress_test_role_super SUPERUSER; +-CREATE ROLE regress_test_indirect; +-CREATE ROLE regress_unprivileged_role; ++CREATE ROLE regress_test_role PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_role_super SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_indirect PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_unprivileged_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE FOREIGN DATA WRAPPER dummy; + COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless'; + CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator; +diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out +index 12e523c737..8872e23935 100644 +--- a/src/test/regress/expected/foreign_key.out ++++ b/src/test/regress/expected/foreign_key.out +@@ -1968,7 +1968,7 @@ ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); + -- leave these tables around intentionally + -- test the case when the referenced table is owned by a different user +-create role regress_other_partitioned_fk_owner; ++create role regress_other_partitioned_fk_owner PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner; + set role regress_other_partitioned_fk_owner; + create table other_partitioned_fk(a int, b int) partition by list (a); +diff --git a/src/test/regress/expected/generated.out b/src/test/regress/expected/generated.out +index 0f623f7119..b48588a54e 100644 +--- a/src/test/regress/expected/generated.out ++++ b/src/test/regress/expected/generated.out +@@ -534,7 +534,7 @@ CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STOR + ALTER TABLE gtest10a DROP COLUMN b; + INSERT INTO gtest10a (a) VALUES (1); + -- privileges +-CREATE USER regress_user11; ++CREATE USER regress_user11 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE gtest11s (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2) STORED); + INSERT INTO gtest11s VALUES (1, 10), (2, 20); + GRANT SELECT (a, c) ON gtest11s TO regress_user11; +diff --git a/src/test/regress/expected/guc.out b/src/test/regress/expected/guc.out +index 127c953297..e6f8272f99 100644 +--- a/src/test/regress/expected/guc.out ++++ b/src/test/regress/expected/guc.out +@@ -584,7 +584,7 @@ PREPARE foo AS SELECT 1; + LISTEN foo_event; + SET vacuum_cost_delay = 13; + CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS; +-CREATE ROLE regress_guc_user; ++CREATE ROLE regress_guc_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_guc_user; + -- look changes + SELECT pg_listening_channels(); +diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out +index a2036a1597..805d73b9d2 100644 +--- a/src/test/regress/expected/hash_index.out ++++ b/src/test/regress/expected/hash_index.out +@@ -20,10 +20,14 @@ CREATE TABLE hash_f8_heap ( + random float8 + ); + \set filename :abs_srcdir '/data/hash.data' +-COPY hash_i4_heap FROM :'filename'; +-COPY hash_name_heap FROM :'filename'; +-COPY hash_txt_heap FROM :'filename'; +-COPY hash_f8_heap FROM :'filename'; ++\set command '\\copy hash_i4_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_name_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_txt_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_f8_heap FROM ' :'filename'; ++:command + -- the data in this file has a lot of duplicates in the index key + -- fields, leading to long bucket chains and lots of table expansion. + -- this is therefore a stress test of the bucket overflow code (unlike +diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out +index cc7772349f..98a08eb48d 100644 +--- a/src/test/regress/expected/identity.out ++++ b/src/test/regress/expected/identity.out +@@ -520,7 +520,7 @@ ALTER TABLE itest7 ALTER COLUMN a SET GENERATED BY DEFAULT; + ALTER TABLE itest7 ALTER COLUMN a RESTART; + ALTER TABLE itest7 ALTER COLUMN a DROP IDENTITY; + -- privileges +-CREATE USER regress_identity_user1; ++CREATE USER regress_identity_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE itest8 (a int GENERATED ALWAYS AS IDENTITY, b text); + GRANT SELECT, INSERT ON itest8 TO regress_identity_user1; + SET ROLE regress_identity_user1; +diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out +index 4943429e9b..0257f22b15 100644 +--- a/src/test/regress/expected/inherit.out ++++ b/src/test/regress/expected/inherit.out +@@ -2606,7 +2606,7 @@ create index on permtest_parent (left(c, 3)); + insert into permtest_parent + select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i; + analyze permtest_parent; +-create role regress_no_child_access; ++create role regress_no_child_access PASSWORD NEON_PASSWORD_PLACEHOLDER; + revoke all on permtest_grandchild from regress_no_child_access; + grant select on permtest_parent to regress_no_child_access; + set session authorization regress_no_child_access; +diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out +index cf4b5221a8..fa6ccb639c 100644 +--- a/src/test/regress/expected/insert.out ++++ b/src/test/regress/expected/insert.out +@@ -802,7 +802,7 @@ drop table mlparted5; + -- appropriate key description (or none) in various situations + create table key_desc (a int, b int) partition by list ((a+0)); + create table key_desc_1 partition of key_desc for values in (1) partition by range (b); +-create user regress_insert_other_user; ++create user regress_insert_other_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant select (a) on key_desc_1 to regress_insert_other_user; + grant insert on key_desc to regress_insert_other_user; + set role regress_insert_other_user; +@@ -914,7 +914,7 @@ DETAIL: Failing row contains (2, hi there). + -- check that the message shows the appropriate column description in a + -- situation where the partitioned table is not the primary ModifyTable node + create table inserttest3 (f1 text default 'foo', f2 text default 'bar', f3 int); +-create role regress_coldesc_role; ++create role regress_coldesc_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant insert on inserttest3 to regress_coldesc_role; + grant insert on brtrigpartcon to regress_coldesc_role; + revoke select on brtrigpartcon from regress_coldesc_role; +diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out +index f8a7dac960..64dcaf171c 100644 +--- a/src/test/regress/expected/jsonb.out ++++ b/src/test/regress/expected/jsonb.out +@@ -4,7 +4,8 @@ CREATE TABLE testjsonb ( + j jsonb + ); + \set filename :abs_srcdir '/data/jsonb.data' +-COPY testjsonb FROM :'filename'; ++\set command '\\copy testjsonb FROM ' :'filename'; ++:command + -- Strings. + SELECT '""'::jsonb; -- OK. + jsonb +diff --git a/src/test/regress/expected/largeobject.out b/src/test/regress/expected/largeobject.out +index 4921dd79ae..d18a3cdd66 100644 +--- a/src/test/regress/expected/largeobject.out ++++ b/src/test/regress/expected/largeobject.out +@@ -7,7 +7,7 @@ + -- ensure consistent test output regardless of the default bytea format + SET bytea_output TO escape; + -- Test ALTER LARGE OBJECT OWNER +-CREATE ROLE regress_lo_user; ++CREATE ROLE regress_lo_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SELECT lo_create(42); + lo_create + ----------- +@@ -346,7 +346,8 @@ SELECT lo_unlink(loid) from lotest_stash_values; + + TRUNCATE lotest_stash_values; + \set filename :abs_srcdir '/data/tenk.data' +-INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); ++\lo_import :filename ++INSERT INTO lotest_stash_values (loid) VALUES (:LASTOID); + BEGIN; + UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + -- verify length of large object +@@ -410,12 +411,8 @@ SELECT lo_close(fd) FROM lotest_stash_values; + + END; + \set filename :abs_builddir '/results/lotest.txt' +-SELECT lo_export(loid, :'filename') FROM lotest_stash_values; +- lo_export +------------ +- 1 +-(1 row) +- ++SELECT loid FROM lotest_stash_values \gset ++\lo_export :loid, :filename + \lo_import :filename + \set newloid :LASTOID + -- just make sure \lo_export does not barf +diff --git a/src/test/regress/expected/lock.out b/src/test/regress/expected/lock.out +index ad137d3645..8dac447436 100644 +--- a/src/test/regress/expected/lock.out ++++ b/src/test/regress/expected/lock.out +@@ -16,7 +16,7 @@ CREATE VIEW lock_view3 AS SELECT * from lock_view2; + CREATE VIEW lock_view4 AS SELECT (select a from lock_tbl1a limit 1) from lock_tbl1; + CREATE VIEW lock_view5 AS SELECT * from lock_tbl1 where a in (select * from lock_tbl1a); + CREATE VIEW lock_view6 AS SELECT * from (select * from lock_tbl1) sub; +-CREATE ROLE regress_rol_lock1; ++CREATE ROLE regress_rol_lock1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER ROLE regress_rol_lock1 SET search_path = lock_schema1; + GRANT USAGE ON SCHEMA lock_schema1 TO regress_rol_lock1; + -- Try all valid lock options; also try omitting the optional TABLE keyword. +diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out +index 67a50bde3d..7eeafd2603 100644 +--- a/src/test/regress/expected/matview.out ++++ b/src/test/regress/expected/matview.out +@@ -549,7 +549,7 @@ SELECT * FROM mvtest_mv_v; + DROP TABLE mvtest_v CASCADE; + NOTICE: drop cascades to materialized view mvtest_mv_v + -- make sure running as superuser works when MV owned by another role (bug #11208) +-CREATE ROLE regress_user_mvtest; ++CREATE ROLE regress_user_mvtest PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET ROLE regress_user_mvtest; + -- this test case also checks for ambiguity in the queries issued by + -- refresh_by_match_merge(), by choosing column names that intentionally +@@ -615,7 +615,7 @@ HINT: Use the REFRESH MATERIALIZED VIEW command. + ROLLBACK; + -- INSERT privileges if relation owner is not allowed to insert. + CREATE SCHEMA matview_schema; +-CREATE USER regress_matview_user; ++CREATE USER regress_matview_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER DEFAULT PRIVILEGES FOR ROLE regress_matview_user + REVOKE INSERT ON TABLES FROM regress_matview_user; + GRANT ALL ON SCHEMA matview_schema TO public; +diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out +index bc9a59803f..5b9ddf0626 100644 +--- a/src/test/regress/expected/merge.out ++++ b/src/test/regress/expected/merge.out +@@ -1,9 +1,9 @@ + -- + -- MERGE + -- +-CREATE USER regress_merge_privs; +-CREATE USER regress_merge_no_privs; +-CREATE USER regress_merge_none; ++CREATE USER regress_merge_privs PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_merge_no_privs PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_merge_none PASSWORD NEON_PASSWORD_PLACEHOLDER; + DROP TABLE IF EXISTS target; + NOTICE: table "target" does not exist, skipping + DROP TABLE IF EXISTS source; +diff --git a/src/test/regress/expected/misc.out b/src/test/regress/expected/misc.out +index 6e816c57f1..6ef45b468e 100644 +--- a/src/test/regress/expected/misc.out ++++ b/src/test/regress/expected/misc.out +@@ -59,9 +59,11 @@ DROP TABLE tmp; + -- copy + -- + \set filename :abs_builddir '/results/onek.data' +-COPY onek TO :'filename'; ++\set command '\\copy onek TO ' :'filename'; ++:command + CREATE TEMP TABLE onek_copy (LIKE onek); +-COPY onek_copy FROM :'filename'; ++\set command '\\copy onek_copy FROM ' :'filename'; ++:command + SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 + ---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- +@@ -73,9 +75,11 @@ SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; + (0 rows) + + \set filename :abs_builddir '/results/stud_emp.data' +-COPY BINARY stud_emp TO :'filename'; ++\set command '\\COPY BINARY stud_emp TO ' :'filename'; ++:command + CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); +-COPY BINARY stud_emp_copy FROM :'filename'; ++\set command '\\COPY BINARY stud_emp_copy FROM ' :'filename'; ++:command + SELECT * FROM stud_emp_copy; + name | age | location | salary | manager | gpa | percent + -------+-----+------------+--------+---------+-----+--------- +diff --git a/src/test/regress/expected/misc_functions.out b/src/test/regress/expected/misc_functions.out +index c669948370..47111b1d24 100644 +--- a/src/test/regress/expected/misc_functions.out ++++ b/src/test/regress/expected/misc_functions.out +@@ -297,7 +297,7 @@ SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity + t + (1 row) + +-CREATE ROLE regress_log_memory; ++CREATE ROLE regress_log_memory PASSWORD NEON_PASSWORD_PLACEHOLDER; + SELECT has_function_privilege('regress_log_memory', + 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no + has_function_privilege +@@ -483,7 +483,7 @@ select count(*) > 0 from + -- + -- Test replication slot directory functions + -- +-CREATE ROLE regress_slot_dir_funcs; ++CREATE ROLE regress_slot_dir_funcs PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- Not available by default. + SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalsnapdir()', 'EXECUTE'); +diff --git a/src/test/regress/expected/object_address.out b/src/test/regress/expected/object_address.out +index fc42d418bf..e38f517574 100644 +--- a/src/test/regress/expected/object_address.out ++++ b/src/test/regress/expected/object_address.out +@@ -5,7 +5,7 @@ + SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_addr_user; + RESET client_min_messages; +-CREATE USER regress_addr_user; ++CREATE USER regress_addr_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- Test generic object addressing/identification functions + CREATE SCHEMA addr_nsp; + SET search_path TO 'addr_nsp'; +diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out +index 8475231735..1afae5395f 100644 +--- a/src/test/regress/expected/password.out ++++ b/src/test/regress/expected/password.out +@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok + SET password_encryption = 'scram-sha-256'; -- ok + -- consistency of password entries + SET password_encryption = 'md5'; +-CREATE ROLE regress_passwd1 PASSWORD 'role_pwd1'; +-CREATE ROLE regress_passwd2 PASSWORD 'role_pwd2'; ++CREATE ROLE regress_passwd1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET password_encryption = 'scram-sha-256'; +-CREATE ROLE regress_passwd3 PASSWORD 'role_pwd3'; +-CREATE ROLE regress_passwd4 PASSWORD NULL; ++CREATE ROLE regress_passwd3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- check list of created entries + -- + -- The scram secret will look something like: +@@ -30,10 +30,10 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+ + ORDER BY rolname, rolpassword; + rolname | rolpassword_masked + -----------------+--------------------------------------------------- +- regress_passwd1 | md5783277baca28003b33453252be4dbb34 +- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3 ++ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1 ++ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2 + regress_passwd3 | SCRAM-SHA-256$4096:$: +- regress_passwd4 | ++ regress_passwd4 | SCRAM-SHA-256$4096:$: + (4 rows) + + -- Rename a role +@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2; + -- passwords. + SET password_encryption = 'md5'; + -- encrypt with MD5 +-ALTER ROLE regress_passwd2 PASSWORD 'foo'; ++ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- already encrypted, use as they are + ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70'; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo='; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + SET password_encryption = 'scram-sha-256'; + -- create SCRAM secret +-ALTER ROLE regress_passwd4 PASSWORD 'foo'; ++ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- already encrypted with MD5, use as it is + CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023'; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + -- This looks like a valid SCRAM-SHA-256 secret, but it is not + -- so it should be hashed with SCRAM-SHA-256. + CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234'; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + -- These may look like valid MD5 secrets, but they are not, so they + -- should be hashed with SCRAM-SHA-256. + -- trailing garbage at the end + CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz'; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + -- invalid length + CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz'; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + -- Changing the SCRAM iteration count + SET scram_iterations = 1024; + CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount'; +@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+ + ORDER BY rolname, rolpassword; + rolname | rolpassword_masked + -----------------+--------------------------------------------------- +- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70 +- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb ++ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1 ++ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2 + regress_passwd3 | SCRAM-SHA-256$4096:$: + regress_passwd4 | SCRAM-SHA-256$4096:$: +- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023 +- regress_passwd6 | SCRAM-SHA-256$4096:$: +- regress_passwd7 | SCRAM-SHA-256$4096:$: +- regress_passwd8 | SCRAM-SHA-256$4096:$: + regress_passwd9 | SCRAM-SHA-256$1024:$: +-(9 rows) ++(5 rows) + + -- An empty password is not allowed, in any form + CREATE ROLE regress_passwd_empty PASSWORD ''; + NOTICE: empty string is not a valid password, clearing password ++ERROR: Failed to get encrypted password: User "regress_passwd_empty" has no password assigned. + ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a'; +-NOTICE: empty string is not a valid password, clearing password ++ERROR: role "regress_passwd_empty" does not exist + ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4='; +-NOTICE: empty string is not a valid password, clearing password ++ERROR: role "regress_passwd_empty" does not exist + SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty'; + rolpassword + ------------- +- +-(1 row) ++(0 rows) + + -- Test with invalid stored and server keys. + -- + -- The first is valid, to act as a control. The others have too long + -- stored/server keys. They will be re-hashed. + CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='; ++ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"} + -- Check that the invalid secrets were re-hashed. A re-hashed secret + -- should not contain the original salt. + SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed + FROM pg_authid + WHERE rolname LIKE 'regress_passwd_sha_len%' + ORDER BY rolname; +- rolname | is_rolpassword_rehashed +--------------------------+------------------------- +- regress_passwd_sha_len0 | f +- regress_passwd_sha_len1 | t +- regress_passwd_sha_len2 | t +-(3 rows) ++ rolname | is_rolpassword_rehashed ++---------+------------------------- ++(0 rows) + + DROP ROLE regress_passwd1; + DROP ROLE regress_passwd2; + DROP ROLE regress_passwd3; + DROP ROLE regress_passwd4; + DROP ROLE regress_passwd5; ++ERROR: role "regress_passwd5" does not exist + DROP ROLE regress_passwd6; ++ERROR: role "regress_passwd6" does not exist + DROP ROLE regress_passwd7; ++ERROR: role "regress_passwd7" does not exist + DROP ROLE regress_passwd8; ++ERROR: role "regress_passwd8" does not exist + DROP ROLE regress_passwd9; + DROP ROLE regress_passwd_empty; ++ERROR: role "regress_passwd_empty" does not exist + DROP ROLE regress_passwd_sha_len0; ++ERROR: role "regress_passwd_sha_len0" does not exist + DROP ROLE regress_passwd_sha_len1; ++ERROR: role "regress_passwd_sha_len1" does not exist + DROP ROLE regress_passwd_sha_len2; ++ERROR: role "regress_passwd_sha_len2" does not exist + -- all entries should have been removed + SELECT rolname, rolpassword + FROM pg_authid +diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out +index fbb0489a4f..2905194e2c 100644 +--- a/src/test/regress/expected/privileges.out ++++ b/src/test/regress/expected/privileges.out +@@ -20,19 +20,19 @@ SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3 + + RESET client_min_messages; + -- test proper begins here +-CREATE USER regress_priv_user1; +-CREATE USER regress_priv_user2; +-CREATE USER regress_priv_user3; +-CREATE USER regress_priv_user4; +-CREATE USER regress_priv_user5; +-CREATE USER regress_priv_user5; -- duplicate ++CREATE USER regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user4 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; -- duplicate + ERROR: role "regress_priv_user5" already exists +-CREATE USER regress_priv_user6; +-CREATE USER regress_priv_user7; +-CREATE USER regress_priv_user8; +-CREATE USER regress_priv_user9; +-CREATE USER regress_priv_user10; +-CREATE ROLE regress_priv_role; ++CREATE USER regress_priv_user6 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user7 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user8 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user9 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user10 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_priv_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- circular ADMIN OPTION grants should be disallowed + GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION; + GRANT regress_priv_user1 TO regress_priv_user3 WITH ADMIN OPTION GRANTED BY regress_priv_user2; +@@ -108,11 +108,11 @@ ERROR: role "regress_priv_user5" cannot be dropped because some objects depend + DETAIL: privileges for membership of role regress_priv_user6 in role regress_priv_user1 + DROP ROLE regress_priv_user1, regress_priv_user5; -- ok, despite order + -- recreate the roles we just dropped +-CREATE USER regress_priv_user1; +-CREATE USER regress_priv_user2; +-CREATE USER regress_priv_user3; +-CREATE USER regress_priv_user4; +-CREATE USER regress_priv_user5; ++CREATE USER regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user4 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT pg_read_all_data TO regress_priv_user6; + GRANT pg_write_all_data TO regress_priv_user7; + GRANT pg_read_all_settings TO regress_priv_user8 WITH ADMIN OPTION; +@@ -145,8 +145,8 @@ REVOKE pg_read_all_settings FROM regress_priv_user8; + DROP USER regress_priv_user10; + DROP USER regress_priv_user9; + DROP USER regress_priv_user8; +-CREATE GROUP regress_priv_group1; +-CREATE GROUP regress_priv_group2 WITH ADMIN regress_priv_user1 USER regress_priv_user2; ++CREATE GROUP regress_priv_group1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_priv_group2 WITH ADMIN regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER USER regress_priv_user2; + ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4; + GRANT regress_priv_group2 TO regress_priv_user2 GRANTED BY regress_priv_user1; + SET SESSION AUTHORIZATION regress_priv_user1; +@@ -172,12 +172,16 @@ GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY regre + ERROR: permission denied to grant privileges as role "regress_priv_role" + DETAIL: The grantor must have the ADMIN option on role "regress_priv_role". + GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY CURRENT_ROLE; ++ERROR: permission denied to grant privileges as role "neondb_owner" ++DETAIL: The grantor must have the ADMIN option on role "regress_priv_role". + REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY foo; -- error + ERROR: role "foo" does not exist + REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY regress_priv_user2; -- warning, noop + WARNING: role "regress_priv_user1" has not been granted membership in role "regress_priv_role" by role "regress_priv_user2" + REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY CURRENT_USER; ++WARNING: role "regress_priv_user1" has not been granted membership in role "regress_priv_role" by role "neondb_owner" + REVOKE regress_priv_role FROM regress_priv_user1 GRANTED BY CURRENT_ROLE; ++WARNING: role "regress_priv_user1" has not been granted membership in role "regress_priv_role" by role "neondb_owner" + DROP ROLE regress_priv_role; + SET SESSION AUTHORIZATION regress_priv_user1; + SELECT session_user, current_user; +@@ -1709,7 +1713,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP + + -- security-restricted operations + \c - +-CREATE ROLE regress_sro_user; ++CREATE ROLE regress_sro_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- Check that index expressions and predicates are run as the table's owner + -- A dummy index function checking current_user + CREATE FUNCTION sro_ifun(int) RETURNS int AS $$ +@@ -2601,8 +2605,8 @@ drop cascades to function testns.priv_testagg(integer) + drop cascades to function testns.priv_testproc(integer) + -- Change owner of the schema & and rename of new schema owner + \c - +-CREATE ROLE regress_schemauser1 superuser login; +-CREATE ROLE regress_schemauser2 superuser login; ++CREATE ROLE regress_schemauser1 superuser login PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_schemauser2 superuser login PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION ROLE regress_schemauser1; + CREATE SCHEMA testns; + SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; +@@ -2725,7 +2729,7 @@ DROP USER regress_priv_user7; + DROP USER regress_priv_user8; -- does not exist + ERROR: role "regress_priv_user8" does not exist + -- permissions with LOCK TABLE +-CREATE USER regress_locktable_user; ++CREATE USER regress_locktable_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE lock_table (a int); + -- LOCK TABLE and SELECT permission + GRANT SELECT ON lock_table TO regress_locktable_user; +@@ -2807,7 +2811,7 @@ DROP USER regress_locktable_user; + -- pg_backend_memory_contexts. + -- switch to superuser + \c - +-CREATE ROLE regress_readallstats; ++CREATE ROLE regress_readallstats PASSWORD NEON_PASSWORD_PLACEHOLDER; + SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no + has_table_privilege + --------------------- +@@ -2851,10 +2855,10 @@ RESET ROLE; + -- clean up + DROP ROLE regress_readallstats; + -- test role grantor machinery +-CREATE ROLE regress_group; +-CREATE ROLE regress_group_direct_manager; +-CREATE ROLE regress_group_indirect_manager; +-CREATE ROLE regress_group_member; ++CREATE ROLE regress_group PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_direct_manager PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_indirect_manager PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_member PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; + GRANT regress_group_direct_manager TO regress_group_indirect_manager; + SET SESSION AUTHORIZATION regress_group_direct_manager; +@@ -2883,9 +2887,9 @@ DROP ROLE regress_group_direct_manager; + DROP ROLE regress_group_indirect_manager; + DROP ROLE regress_group_member; + -- test SET and INHERIT options with object ownership changes +-CREATE ROLE regress_roleoption_protagonist; +-CREATE ROLE regress_roleoption_donor; +-CREATE ROLE regress_roleoption_recipient; ++CREATE ROLE regress_roleoption_protagonist PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_roleoption_donor PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_roleoption_recipient PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA regress_roleoption; + GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; + GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; +diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out +index 7cd0c27cca..d7a124ed68 100644 +--- a/src/test/regress/expected/psql.out ++++ b/src/test/regress/expected/psql.out +@@ -2857,7 +2857,7 @@ Type | func + -- check conditional am display + \pset expanded off + CREATE SCHEMA tableam_display; +-CREATE ROLE regress_display_role; ++CREATE ROLE regress_display_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER SCHEMA tableam_display OWNER TO regress_display_role; + SET search_path TO tableam_display; + CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; +@@ -4808,7 +4808,7 @@ last error message: division by zero + last error code: 22012 + \unset FETCH_COUNT + create schema testpart; +-create role regress_partitioning_role; ++create role regress_partitioning_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + alter schema testpart owner to regress_partitioning_role; + set role to regress_partitioning_role; + -- run test inside own schema and hide other partitions +@@ -5260,7 +5260,7 @@ reset work_mem; + + -- check \df+ + -- we have to use functions with a predictable owner name, so make a role +-create role regress_psql_user superuser; ++create role regress_psql_user superuser PASSWORD NEON_PASSWORD_PLACEHOLDER; + begin; + set session authorization regress_psql_user; + create function psql_df_internal (float8) +@@ -5544,11 +5544,14 @@ CREATE TEMPORARY TABLE reload_output( + line text + ); + SELECT 1 AS a \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + line + --------- +@@ -5587,13 +5590,15 @@ SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; + -- COPY TO file + -- The data goes to :g_out_file and the status to :o_out_file + \set QUIET false +-COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; ++\set command '\\COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO ' :'g_out_file'; ++:command + -- DML command status + UPDATE onek SET unique1 = unique1 WHERE false; + \set QUIET true + \o + -- Check the contents of the files generated. +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + line + ------ +@@ -5610,7 +5615,8 @@ SELECT line FROM reload_output ORDER BY lineno; + (10 rows) + + TRUNCATE TABLE reload_output; +-COPY reload_output(line) FROM :'o_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'o_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + line + ---------- +@@ -5647,7 +5653,8 @@ COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT; + COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file + \o + -- Check the contents of the files generated. +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + line + ------ +@@ -5656,7 +5663,8 @@ SELECT line FROM reload_output ORDER BY lineno; + (2 rows) + + TRUNCATE TABLE reload_output; +-COPY reload_output(line) FROM :'o_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'o_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + line + ------ +@@ -6619,10 +6627,10 @@ cross-database references are not implemented: "no.such.database"."no.such.schem + \dX "no.such.database"."no.such.schema"."no.such.extended.statistics" + cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics" + -- check \drg and \du +-CREATE ROLE regress_du_role0; +-CREATE ROLE regress_du_role1; +-CREATE ROLE regress_du_role2; +-CREATE ROLE regress_du_admin; ++CREATE ROLE regress_du_role0 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_role1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_admin PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; + GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; + GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE; +diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out +index 69dc6cfd85..68390cc18a 100644 +--- a/src/test/regress/expected/publication.out ++++ b/src/test/regress/expected/publication.out +@@ -1,9 +1,9 @@ + -- + -- PUBLICATION + -- +-CREATE ROLE regress_publication_user LOGIN SUPERUSER; +-CREATE ROLE regress_publication_user2; +-CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER; ++CREATE ROLE regress_publication_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_publication_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_publication_user'; + -- suppress warning that depends on wal_level + SET client_min_messages = 'ERROR'; +@@ -1211,7 +1211,7 @@ ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- ok + DROP PUBLICATION testpub2; + DROP PUBLICATION testpub3; + SET ROLE regress_publication_user; +-CREATE ROLE regress_publication_user3; ++CREATE ROLE regress_publication_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT regress_publication_user2 TO regress_publication_user3; + SET client_min_messages = 'ERROR'; + CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test; +diff --git a/src/test/regress/expected/regproc.out b/src/test/regress/expected/regproc.out +index a9420850b8..bd3b5f312d 100644 +--- a/src/test/regress/expected/regproc.out ++++ b/src/test/regress/expected/regproc.out +@@ -2,7 +2,7 @@ + -- regproc + -- + /* If objects exist, return oids */ +-CREATE ROLE regress_regrole_test; ++CREATE ROLE regress_regrole_test PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- without schemaname + SELECT regoper('||/'); + regoper +diff --git a/src/test/regress/expected/roleattributes.out b/src/test/regress/expected/roleattributes.out +index 5e6969b173..2c4d52237f 100644 +--- a/src/test/regress/expected/roleattributes.out ++++ b/src/test/regress/expected/roleattributes.out +@@ -1,233 +1,233 @@ + -- default for superuser is false +-CREATE ROLE regress_test_def_superuser; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_superuser'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_superuser | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_superuser PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_superuser'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_superuser | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_superuser WITH SUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_superuser | t | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_superuser WITH SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_superuser | t | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_superuser WITH NOSUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_superuser | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_superuser | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_superuser WITH SUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_superuser | t | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_superuser | t | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for inherit is true +-CREATE ROLE regress_test_def_inherit; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_inherit'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_inherit | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_inherit PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_inherit'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_inherit | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_inherit WITH NOINHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_inherit | f | f | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_inherit WITH NOINHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_inherit | f | f | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_inherit WITH INHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_inherit | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_inherit | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_inherit WITH NOINHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_inherit | f | f | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_inherit | f | f | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for create role is false +-CREATE ROLE regress_test_def_createrole; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createrole'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_createrole | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_createrole PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createrole'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_createrole | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_createrole WITH CREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createrole | f | t | t | f | f | f | f | -1 | | ++CREATE ROLE regress_test_createrole WITH CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createrole | f | t | t | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_createrole WITH NOCREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createrole | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createrole | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_createrole WITH CREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createrole | f | t | t | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createrole | f | t | t | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for create database is false +-CREATE ROLE regress_test_def_createdb; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createdb'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_createdb | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_createdb PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createdb'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_createdb | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_createdb WITH CREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createdb | f | t | f | t | f | f | f | -1 | | ++CREATE ROLE regress_test_createdb WITH CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createdb | f | t | f | t | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_createdb WITH NOCREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createdb | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createdb | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_createdb WITH CREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_createdb | f | t | f | t | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_createdb | f | t | f | t | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for can login is false for role +-CREATE ROLE regress_test_def_role_canlogin; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_role_canlogin | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_role_canlogin PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_role_canlogin | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_role_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | | ++CREATE ROLE regress_test_role_canlogin WITH LOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_role_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_role_canlogin | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_role_canlogin | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_role_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for can login is true for user +-CREATE USER regress_test_def_user_canlogin; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_user_canlogin | f | t | f | f | t | f | f | -1 | | ++CREATE USER regress_test_def_user_canlogin PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_user_canlogin | f | t | f | f | t | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE USER regress_test_user_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | | ++CREATE USER regress_test_user_canlogin WITH NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER USER regress_test_user_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_user_canlogin | f | t | f | f | t | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_user_canlogin | f | t | f | f | t | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER USER regress_test_user_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for replication is false +-CREATE ROLE regress_test_def_replication; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_replication'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_replication | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_replication PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_replication'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_replication | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_replication WITH REPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_replication | f | t | f | f | f | t | f | -1 | | ++CREATE ROLE regress_test_replication WITH REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_replication | f | t | f | f | f | t | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_replication WITH NOREPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_replication | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_replication | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_replication WITH REPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_replication | f | t | f | f | f | t | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_replication | f | t | f | f | f | t | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- default for bypassrls is false +-CREATE ROLE regress_test_def_bypassrls; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_def_bypassrls | f | t | f | f | f | f | f | -1 | | ++CREATE ROLE regress_test_def_bypassrls PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_def_bypassrls | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + +-CREATE ROLE regress_test_bypassrls WITH BYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_bypassrls | f | t | f | f | f | f | t | -1 | | ++CREATE ROLE regress_test_bypassrls WITH BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_bypassrls | f | t | f | f | f | f | t | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_bypassrls WITH NOBYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_bypassrls | f | t | f | f | f | f | f | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_bypassrls | f | t | f | f | f | f | f | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + ALTER ROLE regress_test_bypassrls WITH BYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; +- rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- +- regress_test_bypassrls | f | t | f | f | f | f | t | -1 | | ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | regexp_replace | rolvaliduntil ++------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+---------------------------------------------------+--------------- ++ regress_test_bypassrls | f | t | f | f | f | f | t | -1 | SCRAM-SHA-256$4096:$: | + (1 row) + + -- clean up roles +diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out +index 97ca9bf72c..b2a7a6f710 100644 +--- a/src/test/regress/expected/rowsecurity.out ++++ b/src/test/regress/expected/rowsecurity.out +@@ -14,13 +14,13 @@ DROP ROLE IF EXISTS regress_rls_group2; + DROP SCHEMA IF EXISTS regress_rls_schema CASCADE; + RESET client_min_messages; + -- initial setup +-CREATE USER regress_rls_alice NOLOGIN; +-CREATE USER regress_rls_bob NOLOGIN; +-CREATE USER regress_rls_carol NOLOGIN; +-CREATE USER regress_rls_dave NOLOGIN; +-CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN; +-CREATE ROLE regress_rls_group1 NOLOGIN; +-CREATE ROLE regress_rls_group2 NOLOGIN; ++CREATE USER regress_rls_alice NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_bob NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_carol NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_dave NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_group1 NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_group2 NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT regress_rls_group1 TO regress_rls_bob; + GRANT regress_rls_group2 TO regress_rls_carol; + CREATE SCHEMA regress_rls_schema; +@@ -4352,8 +4352,8 @@ SELECT count(*) = 0 FROM pg_depend + + -- DROP OWNED BY testing + RESET SESSION AUTHORIZATION; +-CREATE ROLE regress_rls_dob_role1; +-CREATE ROLE regress_rls_dob_role2; ++CREATE ROLE regress_rls_dob_role1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_dob_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE dob_t1 (c1 int); + CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); + CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true); +diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out +index 09a255649b..15895f0c53 100644 +--- a/src/test/regress/expected/rules.out ++++ b/src/test/regress/expected/rules.out +@@ -3708,7 +3708,7 @@ DROP TABLE ruletest2; + -- Test non-SELECT rule on security invoker view. + -- Should use view owner's permissions. + -- +-CREATE USER regress_rule_user1; ++CREATE USER regress_rule_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE ruletest_t1 (x int); + CREATE TABLE ruletest_t2 (x int); + CREATE VIEW ruletest_v1 WITH (security_invoker=true) AS +diff --git a/src/test/regress/expected/security_label.out b/src/test/regress/expected/security_label.out +index a8e01a6220..5a9cef4ede 100644 +--- a/src/test/regress/expected/security_label.out ++++ b/src/test/regress/expected/security_label.out +@@ -6,8 +6,8 @@ SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_seclabel_user1; + DROP ROLE IF EXISTS regress_seclabel_user2; + RESET client_min_messages; +-CREATE USER regress_seclabel_user1 WITH CREATEROLE; +-CREATE USER regress_seclabel_user2; ++CREATE USER regress_seclabel_user1 WITH CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_seclabel_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE seclabel_tbl1 (a int, b text); + CREATE TABLE seclabel_tbl2 (x int, y text); + CREATE VIEW seclabel_view1 AS SELECT * FROM seclabel_tbl2; +@@ -19,21 +19,21 @@ ALTER TABLE seclabel_tbl2 OWNER TO regress_seclabel_user2; + -- Test of SECURITY LABEL statement without a plugin + -- + SECURITY LABEL ON TABLE seclabel_tbl1 IS 'classified'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + SECURITY LABEL FOR 'dummy' ON TABLE seclabel_tbl1 IS 'classified'; -- fail + ERROR: security label provider "dummy" is not loaded + SECURITY LABEL ON TABLE seclabel_tbl1 IS '...invalid label...'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + SECURITY LABEL ON TABLE seclabel_tbl3 IS 'unclassified'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + SECURITY LABEL ON ROLE regress_seclabel_user1 IS 'classified'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + SECURITY LABEL FOR 'dummy' ON ROLE regress_seclabel_user1 IS 'classified'; -- fail + ERROR: security label provider "dummy" is not loaded + SECURITY LABEL ON ROLE regress_seclabel_user1 IS '...invalid label...'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + SECURITY LABEL ON ROLE regress_seclabel_user3 IS 'unclassified'; -- fail +-ERROR: no security label providers have been loaded ++ERROR: must specify provider when multiple security label providers have been loaded + -- clean up objects + DROP FUNCTION seclabel_four(); + DROP DOMAIN seclabel_domain; +diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out +index b79fe9a1c0..e29fab88ab 100644 +--- a/src/test/regress/expected/select_into.out ++++ b/src/test/regress/expected/select_into.out +@@ -15,7 +15,7 @@ DROP TABLE sitmp1; + -- SELECT INTO and INSERT permission, if owner is not allowed to insert. + -- + CREATE SCHEMA selinto_schema; +-CREATE USER regress_selinto_user; ++CREATE USER regress_selinto_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user + REVOKE INSERT ON TABLES FROM regress_selinto_user; + GRANT ALL ON SCHEMA selinto_schema TO public; +diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out +index 1aeed8452b..7d9427d070 100644 +--- a/src/test/regress/expected/select_views.out ++++ b/src/test/regress/expected/select_views.out +@@ -1250,7 +1250,7 @@ SELECT * FROM toyemp WHERE name = 'sharon'; + -- + -- Test for Leaky view scenario + -- +-CREATE ROLE regress_alice; ++CREATE ROLE regress_alice PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE FUNCTION f_leak (text) + RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001 + AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; +diff --git a/src/test/regress/expected/sequence.out b/src/test/regress/expected/sequence.out +index f02f020542..c9e0fda350 100644 +--- a/src/test/regress/expected/sequence.out ++++ b/src/test/regress/expected/sequence.out +@@ -22,7 +22,7 @@ CREATE SEQUENCE sequence_testx OWNED BY pg_class_oid_index.oid; -- not a table + ERROR: sequence cannot be owned by relation "pg_class_oid_index" + DETAIL: This operation is not supported for indexes. + CREATE SEQUENCE sequence_testx OWNED BY pg_class.relname; -- not same schema +-ERROR: sequence must be in same schema as table it is linked to ++ERROR: sequence must have same owner as table it is linked to + CREATE TABLE sequence_test_table (a int); + CREATE SEQUENCE sequence_testx OWNED BY sequence_test_table.b; -- wrong column + ERROR: column "b" of relation "sequence_test_table" does not exist +@@ -639,7 +639,7 @@ SELECT setval('sequence_test2', 1); -- error + ERROR: cannot execute setval() in a read-only transaction + ROLLBACK; + -- privileges tests +-CREATE USER regress_seq_user; ++CREATE USER regress_seq_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- nextval + BEGIN; + SET LOCAL SESSION AUTHORIZATION regress_seq_user; +diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out +index 94187e59cf..72346e2c71 100644 +--- a/src/test/regress/expected/stats.out ++++ b/src/test/regress/expected/stats.out +@@ -1283,37 +1283,6 @@ SELECT current_setting('fsync') = 'off' + t + (1 row) + +--- Change the tablespace so that the table is rewritten directly, then SELECT +--- from it to cause it to be read back into shared buffers. +-SELECT sum(reads) AS io_sum_shared_before_reads +- FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +--- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly +--- rewritten table, e.g. by autovacuum. +-BEGIN; +-ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; +--- SELECT from the table so that the data is read into shared buffers and +--- context 'normal', object 'relation' reads are counted. +-SELECT COUNT(*) FROM test_io_shared; +- count +-------- +- 100 +-(1 row) +- +-COMMIT; +-SELECT pg_stat_force_next_flush(); +- pg_stat_force_next_flush +--------------------------- +- +-(1 row) +- +-SELECT sum(reads) AS io_sum_shared_after_reads +- FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +-SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; +- ?column? +----------- +- t +-(1 row) +- + SELECT sum(hits) AS io_sum_shared_before_hits + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset + -- Select from the table again to count hits. +@@ -1415,6 +1384,7 @@ SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, + -- local buffers, exercising a different codepath than standard local buffer + -- writes. + ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; ++ERROR: tablespace "regress_tblspace" does not exist + SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush + -------------------------- +@@ -1426,7 +1396,7 @@ SELECT sum(writes) AS io_sum_local_new_tblspc_writes + SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; + ?column? + ---------- +- t ++ f + (1 row) + + RESET temp_buffers; +diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out +index b4c85613de..d32a9a69ad 100644 +--- a/src/test/regress/expected/stats_ext.out ++++ b/src/test/regress/expected/stats_ext.out +@@ -70,7 +70,7 @@ DROP TABLE ext_stats_test; + CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); + CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; + COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; +-CREATE ROLE regress_stats_ext; ++CREATE ROLE regress_stats_ext PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_stats_ext; + COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; + ERROR: must be owner of statistics object ab1_a_b_stats +@@ -3214,7 +3214,7 @@ set search_path to public, stts_s1; + stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined + (10 rows) + +-create role regress_stats_ext nosuperuser; ++create role regress_stats_ext nosuperuser PASSWORD NEON_PASSWORD_PLACEHOLDER; + set role regress_stats_ext; + \dX + List of extended statistics +@@ -3237,7 +3237,7 @@ drop schema stts_s1, stts_s2 cascade; + drop user regress_stats_ext; + reset search_path; + -- User with no access +-CREATE USER regress_stats_user1; ++CREATE USER regress_stats_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; + SET SESSION AUTHORIZATION regress_stats_user1; + SELECT * FROM tststats.priv_test_tbl; -- Permission denied +diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out +index b15eddbff3..e9ba4568eb 100644 +--- a/src/test/regress/expected/subscription.out ++++ b/src/test/regress/expected/subscription.out +@@ -1,10 +1,10 @@ + -- + -- SUBSCRIPTION + -- +-CREATE ROLE regress_subscription_user LOGIN SUPERUSER; +-CREATE ROLE regress_subscription_user2; +-CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription; +-CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER; ++CREATE ROLE regress_subscription_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_subscription_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_subscription_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_create_subscription; ++CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_subscription_user'; + -- fail - no publications + CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; +diff --git a/src/test/regress/expected/test_setup.out b/src/test/regress/expected/test_setup.out +index 5d9e6bf12b..c5fddfdca6 100644 +--- a/src/test/regress/expected/test_setup.out ++++ b/src/test/regress/expected/test_setup.out +@@ -21,6 +21,7 @@ GRANT ALL ON SCHEMA public TO public; + -- Create a tablespace we can use in tests. + SET allow_in_place_tablespaces = true; + CREATE TABLESPACE regress_tblspace LOCATION ''; ++ERROR: CREATE TABLESPACE is not supported on Neon + -- + -- These tables have traditionally been referenced by many tests, + -- so create and populate them. Insert only non-error values here. +@@ -111,7 +112,8 @@ CREATE TABLE onek ( + string4 name + ); + \set filename :abs_srcdir '/data/onek.data' +-COPY onek FROM :'filename'; ++\set command '\\copy onek FROM ' :'filename'; ++:command + VACUUM ANALYZE onek; + CREATE TABLE onek2 AS SELECT * FROM onek; + VACUUM ANALYZE onek2; +@@ -134,7 +136,8 @@ CREATE TABLE tenk1 ( + string4 name + ); + \set filename :abs_srcdir '/data/tenk.data' +-COPY tenk1 FROM :'filename'; ++\set command '\\copy tenk1 FROM ' :'filename'; ++:command + VACUUM ANALYZE tenk1; + CREATE TABLE tenk2 AS SELECT * FROM tenk1; + VACUUM ANALYZE tenk2; +@@ -144,20 +147,23 @@ CREATE TABLE person ( + location point + ); + \set filename :abs_srcdir '/data/person.data' +-COPY person FROM :'filename'; ++\set command '\\copy person FROM ' :'filename'; ++:command + VACUUM ANALYZE person; + CREATE TABLE emp ( + salary int4, + manager name + ) INHERITS (person); + \set filename :abs_srcdir '/data/emp.data' +-COPY emp FROM :'filename'; ++\set command '\\copy emp FROM ' :'filename'; ++:command + VACUUM ANALYZE emp; + CREATE TABLE student ( + gpa float8 + ) INHERITS (person); + \set filename :abs_srcdir '/data/student.data' +-COPY student FROM :'filename'; ++\set command '\\copy student FROM ' :'filename'; ++:command + VACUUM ANALYZE student; + CREATE TABLE stud_emp ( + percent int4 +@@ -166,14 +172,16 @@ NOTICE: merging multiple inherited definitions of column "name" + NOTICE: merging multiple inherited definitions of column "age" + NOTICE: merging multiple inherited definitions of column "location" + \set filename :abs_srcdir '/data/stud_emp.data' +-COPY stud_emp FROM :'filename'; ++\set command '\\copy stud_emp FROM ' :'filename'; ++:command + VACUUM ANALYZE stud_emp; + CREATE TABLE road ( + name text, + thepath path + ); + \set filename :abs_srcdir '/data/streets.data' +-COPY road FROM :'filename'; ++\set command '\\copy road FROM ' :'filename'; ++:command + VACUUM ANALYZE road; + CREATE TABLE ihighway () INHERITS (road); + INSERT INTO ihighway +diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out +index 9fad6c8b04..a1b8e82389 100644 +--- a/src/test/regress/expected/tsearch.out ++++ b/src/test/regress/expected/tsearch.out +@@ -63,7 +63,8 @@ CREATE TABLE test_tsvector( + a tsvector + ); + \set filename :abs_srcdir '/data/tsearch.data' +-COPY test_tsvector FROM :'filename'; ++\set command '\\copy test_tsvector FROM ' :'filename'; ++:command + ANALYZE test_tsvector; + -- test basic text search behavior without indexes, then with + SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; +diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out +index ba46c32029..eac3017bac 100644 +--- a/src/test/regress/expected/updatable_views.out ++++ b/src/test/regress/expected/updatable_views.out +@@ -999,9 +999,9 @@ NOTICE: drop cascades to 2 other objects + DETAIL: drop cascades to view rw_view1 + drop cascades to function rw_view1_aa(rw_view1) + -- permissions checks +-CREATE USER regress_view_user1; +-CREATE USER regress_view_user2; +-CREATE USER regress_view_user3; ++CREATE USER regress_view_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_view_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_view_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_view_user1; + CREATE TABLE base_tbl(a int, b text, c float); + INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +@@ -3094,8 +3094,8 @@ DETAIL: View columns that are not columns of their base relation are not updata + drop view uv_iocu_view; + drop table uv_iocu_tab; + -- ON CONFLICT DO UPDATE permissions checks +-create user regress_view_user1; +-create user regress_view_user2; ++create user regress_view_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++create user regress_view_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + set session authorization regress_view_user1; + create table base_tbl(a int unique, b text, c float); + insert into base_tbl values (1,'xxx',1.0); +diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out +index c809f88f54..d1d57852d4 100644 +--- a/src/test/regress/expected/update.out ++++ b/src/test/regress/expected/update.out +@@ -602,7 +602,7 @@ DROP FUNCTION func_parted_mod_b(); + -- RLS policies with update-row-movement + ----------------------------------------- + ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY; +-CREATE USER regress_range_parted_user; ++CREATE USER regress_range_parted_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT ALL ON range_parted, mintab TO regress_range_parted_user; + CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); + CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); +diff --git a/src/test/regress/expected/vacuum.out b/src/test/regress/expected/vacuum.out +index 4aaf4f025d..40a339758a 100644 +--- a/src/test/regress/expected/vacuum.out ++++ b/src/test/regress/expected/vacuum.out +@@ -433,7 +433,7 @@ CREATE TABLE vacowned (a int); + CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); + CREATE TABLE vacowned_part1 PARTITION OF vacowned_parted FOR VALUES IN (1); + CREATE TABLE vacowned_part2 PARTITION OF vacowned_parted FOR VALUES IN (2); +-CREATE ROLE regress_vacuum; ++CREATE ROLE regress_vacuum PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET ROLE regress_vacuum; + -- Simple table + VACUUM vacowned; +diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule +index 3d14bf4e4f..87f351b1d1 100644 +--- a/src/test/regress/parallel_schedule ++++ b/src/test/regress/parallel_schedule +@@ -130,4 +130,4 @@ test: fast_default + + # run tablespace test at the end because it drops the tablespace created during + # setup that other tests may use. +-test: tablespace ++#test: tablespace +diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql +index f51726e8ed..8854104eff 100644 +--- a/src/test/regress/sql/aggregates.sql ++++ b/src/test/regress/sql/aggregates.sql +@@ -15,7 +15,8 @@ CREATE TABLE aggtest ( + ); + + \set filename :abs_srcdir '/data/agg.data' +-COPY aggtest FROM :'filename'; ++\set command '\\copy aggtest FROM ' :'filename'; ++:command + + ANALYZE aggtest; + +diff --git a/src/test/regress/sql/alter_generic.sql b/src/test/regress/sql/alter_generic.sql +index de58d268d3..9d38df7f42 100644 +--- a/src/test/regress/sql/alter_generic.sql ++++ b/src/test/regress/sql/alter_generic.sql +@@ -22,9 +22,9 @@ DROP ROLE IF EXISTS regress_alter_generic_user3; + + RESET client_min_messages; + +-CREATE USER regress_alter_generic_user3; +-CREATE USER regress_alter_generic_user2; +-CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; ++CREATE USER regress_alter_generic_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_alter_generic_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_alter_generic_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE regress_alter_generic_user3; + + CREATE SCHEMA alt_nsp1; + CREATE SCHEMA alt_nsp2; +@@ -316,7 +316,7 @@ DROP OPERATOR FAMILY alt_opf4 USING btree; + + -- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP + BEGIN TRANSACTION; +-CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; ++CREATE ROLE regress_alter_generic_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER NOSUPERUSER; + CREATE OPERATOR FAMILY alt_opf5 USING btree; + SET ROLE regress_alter_generic_user5; + ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); +@@ -326,7 +326,7 @@ ROLLBACK; + + -- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP + BEGIN TRANSACTION; +-CREATE ROLE regress_alter_generic_user6; ++CREATE ROLE regress_alter_generic_user6 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA alt_nsp6; + REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; + CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; +diff --git a/src/test/regress/sql/alter_operator.sql b/src/test/regress/sql/alter_operator.sql +index fd40370165..ca8055e06d 100644 +--- a/src/test/regress/sql/alter_operator.sql ++++ b/src/test/regress/sql/alter_operator.sql +@@ -87,7 +87,7 @@ ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjo + -- + -- Test permission check. Must be owner to ALTER OPERATOR. + -- +-CREATE USER regress_alter_op_user; ++CREATE USER regress_alter_op_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_alter_op_user; + + ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); +diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql +index d2845abc97..a0719b8d0e 100644 +--- a/src/test/regress/sql/alter_table.sql ++++ b/src/test/regress/sql/alter_table.sql +@@ -7,7 +7,7 @@ SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_alter_table_user1; + RESET client_min_messages; + +-CREATE USER regress_alter_table_user1; ++CREATE USER regress_alter_table_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- + -- add attribute +@@ -2397,8 +2397,8 @@ DROP TABLE fail_part; + ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1); + + -- check ownership of the source table +-CREATE ROLE regress_test_me; +-CREATE ROLE regress_test_not_me; ++CREATE ROLE regress_test_me PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_not_me PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE not_owned_by_me (LIKE list_parted); + ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me; + SET SESSION AUTHORIZATION regress_test_me; +diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql +index e414fa560d..79a75a0e57 100644 +--- a/src/test/regress/sql/arrays.sql ++++ b/src/test/regress/sql/arrays.sql +@@ -22,7 +22,8 @@ CREATE TABLE array_op_test ( + ); + + \set filename :abs_srcdir '/data/array.data' +-COPY array_op_test FROM :'filename'; ++\set command '\\copy array_op_test FROM ' :'filename'; ++:command + ANALYZE array_op_test; + + -- +diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql +index 239f4a4755..f29d87bdff 100644 +--- a/src/test/regress/sql/btree_index.sql ++++ b/src/test/regress/sql/btree_index.sql +@@ -26,16 +26,20 @@ CREATE TABLE bt_f8_heap ( + ); + + \set filename :abs_srcdir '/data/desc.data' +-COPY bt_i4_heap FROM :'filename'; ++\set command '\\copy bt_i4_heap FROM ' :'filename'; ++:command + + \set filename :abs_srcdir '/data/hash.data' +-COPY bt_name_heap FROM :'filename'; ++\set command '\\copy bt_name_heap FROM ' :'filename'; ++:command + + \set filename :abs_srcdir '/data/desc.data' +-COPY bt_txt_heap FROM :'filename'; ++\set command '\\copy bt_txt_heap FROM ' :'filename'; ++:command + + \set filename :abs_srcdir '/data/hash.data' +-COPY bt_f8_heap FROM :'filename'; ++\set command '\\copy bt_f8_heap FROM ' :'filename'; ++:command + + ANALYZE bt_i4_heap; + ANALYZE bt_name_heap; +diff --git a/src/test/regress/sql/cluster.sql b/src/test/regress/sql/cluster.sql +index 6cb9c926c0..5e689e4062 100644 +--- a/src/test/regress/sql/cluster.sql ++++ b/src/test/regress/sql/cluster.sql +@@ -108,7 +108,7 @@ WHERE pg_class.oid=indexrelid + CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index; + + -- Verify that clustering all tables does in fact cluster the right ones +-CREATE USER regress_clstr_user; ++CREATE USER regress_clstr_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE clstr_1 (a INT PRIMARY KEY); + CREATE TABLE clstr_2 (a INT PRIMARY KEY); + CREATE TABLE clstr_3 (a INT PRIMARY KEY); +@@ -233,7 +233,7 @@ DROP TABLE clstrpart; + CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i); + CREATE INDEX ptnowner_i_idx ON ptnowner(i); + CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1); +-CREATE ROLE regress_ptnowner; ++CREATE ROLE regress_ptnowner PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2); + ALTER TABLE ptnowner1 OWNER TO regress_ptnowner; + ALTER TABLE ptnowner OWNER TO regress_ptnowner; +diff --git a/src/test/regress/sql/collate.icu.utf8.sql b/src/test/regress/sql/collate.icu.utf8.sql +index 3db9e25913..c66d5aa2c2 100644 +--- a/src/test/regress/sql/collate.icu.utf8.sql ++++ b/src/test/regress/sql/collate.icu.utf8.sql +@@ -353,7 +353,7 @@ reset enable_seqscan; + + -- schema manipulation commands + +-CREATE ROLE regress_test_role; ++CREATE ROLE regress_test_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA test_schema; + + -- We need to do this this way to cope with varying names for encodings: +diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql +index e3e3bea709..fa86ddc326 100644 +--- a/src/test/regress/sql/constraints.sql ++++ b/src/test/regress/sql/constraints.sql +@@ -243,12 +243,14 @@ CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); + + \set filename :abs_srcdir '/data/constro.data' +-COPY COPY_TBL FROM :'filename'; ++\set command '\\copy COPY_TBL FROM ' :'filename'; ++:command + + SELECT * FROM COPY_TBL; + + \set filename :abs_srcdir '/data/constrf.data' +-COPY COPY_TBL FROM :'filename'; ++\set command '\\copy COPY_TBL FROM ' :'filename'; ++:command + + SELECT * FROM COPY_TBL; + +@@ -599,7 +601,7 @@ DROP TABLE deferred_excl; + + -- Comments + -- Setup a low-level role to enforce non-superuser checks. +-CREATE ROLE regress_constraint_comments; ++CREATE ROLE regress_constraint_comments PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_constraint_comments; + + CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); +@@ -621,7 +623,7 @@ COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; + + -- unauthorized user + RESET SESSION AUTHORIZATION; +-CREATE ROLE regress_constraint_comments_noaccess; ++CREATE ROLE regress_constraint_comments_noaccess PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; + COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; + COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; +diff --git a/src/test/regress/sql/conversion.sql b/src/test/regress/sql/conversion.sql +index 9a65fca91f..58431a3056 100644 +--- a/src/test/regress/sql/conversion.sql ++++ b/src/test/regress/sql/conversion.sql +@@ -12,7 +12,7 @@ CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, r + AS :'regresslib', 'test_enc_conversion' + LANGUAGE C STRICT; + +-CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE; ++CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_conversion_user; + CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; + -- +diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql +index 43d2e906dd..6c993d70f0 100644 +--- a/src/test/regress/sql/copy.sql ++++ b/src/test/regress/sql/copy.sql +@@ -20,11 +20,13 @@ insert into copytest values('Mac',E'abc\rdef',3); + insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); + + \set filename :abs_builddir '/results/copytest.csv' +-copy copytest to :'filename' csv; ++\set command '\\copy copytest to ' :'filename' csv; ++:command + + create temp table copytest2 (like copytest); + +-copy copytest2 from :'filename' csv; ++\set command '\\copy copytest2 from ' :'filename' csv; ++:command + + select * from copytest except select * from copytest2; + +@@ -32,9 +34,11 @@ truncate copytest2; + + --- same test but with an escape char different from quote char + +-copy copytest to :'filename' csv quote '''' escape E'\\'; ++\set command '\\copy copytest to ' :'filename' ' csv quote ' '\'\'\'\'' ' escape ' 'E\'' '\\\\\''; ++:command + +-copy copytest2 from :'filename' csv quote '''' escape E'\\'; ++\set command '\\copy copytest2 from ' :'filename' ' csv quote ' '\'\'\'\'' ' escape ' 'E\'' '\\\\\''; ++:command + + select * from copytest except select * from copytest2; + +@@ -86,16 +90,19 @@ insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; + insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; + + \set filename :abs_builddir '/results/parted_copytest.csv' +-copy (select * from parted_copytest order by a) to :'filename'; ++\set command '\\copy (select * from parted_copytest order by a) to ' :'filename'; ++:command + + truncate parted_copytest; + +-copy parted_copytest from :'filename'; ++\set command '\\copy parted_copytest from ' :'filename'; ++:command + + -- Ensure COPY FREEZE errors for partitioned tables. + begin; + truncate parted_copytest; +-copy parted_copytest from :'filename' (freeze); ++\set command '\\copy parted_copytest from ' :'filename' (freeze); ++:command + rollback; + + select tableoid::regclass,count(*),sum(a) from parted_copytest +@@ -115,7 +122,8 @@ create trigger part_ins_trig + for each row + execute procedure part_ins_func(); + +-copy parted_copytest from :'filename'; ++\set command '\\copy parted_copytest from ' :'filename'; ++:command + + select tableoid::regclass,count(*),sum(a) from parted_copytest + group by tableoid order by tableoid::regclass::name; +@@ -124,7 +132,8 @@ truncate table parted_copytest; + create index on parted_copytest (b); + drop trigger part_ins_trig on parted_copytest_a2; + +-copy parted_copytest from stdin; ++\set command '\\copy parted_copytest from ' stdin; ++:command + 1 1 str1 + 2 2 str2 + \. +@@ -191,8 +200,8 @@ bill 20 (11,10) 1000 sharon + -- Generate COPY FROM report with FILE, with some excluded tuples. + truncate tab_progress_reporting; + \set filename :abs_srcdir '/data/emp.data' +-copy tab_progress_reporting from :'filename' +- where (salary < 2000); ++\set command '\\copy tab_progress_reporting from ' :'filename' 'where (salary < 2000)'; ++:command + + drop trigger check_after_tab_progress_reporting on tab_progress_reporting; + drop function notice_after_tab_progress_reporting(); +@@ -311,7 +320,8 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1); + -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org + -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us + \set filename :abs_srcdir '/data/desc.data' +-COPY parted_si(id, data) FROM :'filename'; ++\set command '\\COPY parted_si(id, data) FROM ' :'filename'; ++:command + + -- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from + -- the wrong partition. This test is *not* guaranteed to trigger that bug, but +diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql +index d759635068..d58e50dcc5 100644 +--- a/src/test/regress/sql/copy2.sql ++++ b/src/test/regress/sql/copy2.sql +@@ -365,8 +365,8 @@ copy check_con_tbl from stdin; + select * from check_con_tbl; + + -- test with RLS enabled. +-CREATE ROLE regress_rls_copy_user; +-CREATE ROLE regress_rls_copy_user_colperms; ++CREATE ROLE regress_rls_copy_user PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_copy_user_colperms PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE rls_t1 (a int, b int, c int); + + COPY rls_t1 (a, b, c) from stdin; +diff --git a/src/test/regress/sql/create_function_sql.sql b/src/test/regress/sql/create_function_sql.sql +index 89e9af3a49..2b86fe2285 100644 +--- a/src/test/regress/sql/create_function_sql.sql ++++ b/src/test/regress/sql/create_function_sql.sql +@@ -6,7 +6,7 @@ + + -- All objects made in this test are in temp_func_test schema + +-CREATE USER regress_unpriv_user; ++CREATE USER regress_unpriv_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE SCHEMA temp_func_test; + GRANT ALL ON SCHEMA temp_func_test TO public; +diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql +index d49ce9f300..47fa813bc8 100644 +--- a/src/test/regress/sql/create_index.sql ++++ b/src/test/regress/sql/create_index.sql +@@ -71,7 +71,8 @@ CREATE TABLE fast_emp4000 ( + ); + + \set filename :abs_srcdir '/data/rect.data' +-COPY slow_emp4000 FROM :'filename'; ++\set command '\\copy slow_emp4000 FROM ' :'filename'; ++:command + + INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000; + +@@ -269,7 +270,8 @@ CREATE TABLE array_index_op_test ( + ); + + \set filename :abs_srcdir '/data/array.data' +-COPY array_index_op_test FROM :'filename'; ++\set command '\\copy array_index_op_test FROM ' :'filename'; ++:command + ANALYZE array_index_op_test; + + SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno; +@@ -1246,7 +1248,7 @@ END; + REINDEX SCHEMA CONCURRENTLY schema_to_reindex; + + -- Failure for unauthorized user +-CREATE ROLE regress_reindexuser NOLOGIN; ++CREATE ROLE regress_reindexuser NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION ROLE regress_reindexuser; + REINDEX SCHEMA schema_to_reindex; + -- Permission failures with toast tables and indexes (pg_authid here) +diff --git a/src/test/regress/sql/create_procedure.sql b/src/test/regress/sql/create_procedure.sql +index 069a3727ce..faeeb3f744 100644 +--- a/src/test/regress/sql/create_procedure.sql ++++ b/src/test/regress/sql/create_procedure.sql +@@ -255,7 +255,7 @@ DROP PROCEDURE nonexistent(); + + -- privileges + +-CREATE USER regress_cp_user1; ++CREATE USER regress_cp_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT INSERT ON cp_test TO regress_cp_user1; + REVOKE EXECUTE ON PROCEDURE ptest1(text) FROM PUBLIC; + SET ROLE regress_cp_user1; +diff --git a/src/test/regress/sql/create_role.sql b/src/test/regress/sql/create_role.sql +index 4491a28a8a..3045434865 100644 +--- a/src/test/regress/sql/create_role.sql ++++ b/src/test/regress/sql/create_role.sql +@@ -1,20 +1,20 @@ + -- ok, superuser can create users with any set of privileges +-CREATE ROLE regress_role_super SUPERUSER; +-CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; ++CREATE ROLE regress_role_super SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; +-CREATE ROLE regress_role_limited_admin CREATEROLE; +-CREATE ROLE regress_role_normal; ++CREATE ROLE regress_role_limited_admin CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_role_normal PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, CREATEROLE user can't give away role attributes without having them + SET SESSION AUTHORIZATION regress_role_limited_admin; +-CREATE ROLE regress_nosuch_superuser SUPERUSER; +-CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; +-CREATE ROLE regress_nosuch_replication REPLICATION; +-CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; +-CREATE ROLE regress_nosuch_createdb CREATEDB; ++CREATE ROLE regress_nosuch_superuser SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_nosuch_replication REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_nosuch_bypassrls BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_nosuch_createdb CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, can create a role without any special attributes +-CREATE ROLE regress_role_limited; ++CREATE ROLE regress_role_limited PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, can't give it in any of the restricted attributes + ALTER ROLE regress_role_limited SUPERUSER; +@@ -25,10 +25,10 @@ DROP ROLE regress_role_limited; + + -- ok, can give away these role attributes if you have them + SET SESSION AUTHORIZATION regress_role_admin; +-CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; +-CREATE ROLE regress_replication REPLICATION; +-CREATE ROLE regress_bypassrls BYPASSRLS; +-CREATE ROLE regress_createdb CREATEDB; ++CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_replication REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_bypassrls BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_createdb CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, can toggle these role attributes off and on if you have them + ALTER ROLE regress_replication NOREPLICATION; +@@ -43,52 +43,52 @@ ALTER ROLE regress_createdb SUPERUSER; + ALTER ROLE regress_createdb NOSUPERUSER; + + -- ok, having CREATEROLE is enough to create users with these privileges +-CREATE ROLE regress_createrole CREATEROLE NOINHERIT; ++CREATE ROLE regress_createrole CREATEROLE NOINHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; +-CREATE ROLE regress_login LOGIN; +-CREATE ROLE regress_inherit INHERIT; +-CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; +-CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; +-CREATE ROLE regress_password_null PASSWORD NULL; ++CREATE ROLE regress_login LOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_inherit INHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_connection_limit CONNECTION LIMIT 5 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, backwards compatible noise words should be ignored +-CREATE ROLE regress_noiseword SYSID 12345; ++CREATE ROLE regress_noiseword SYSID 12345 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, cannot grant membership in superuser role +-CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; ++CREATE ROLE regress_nosuch_super IN ROLE regress_role_super PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, database owner cannot have members +-CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; ++CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, can grant other users into a role + CREATE ROLE regress_inroles ROLE + regress_role_super, regress_createdb, regress_createrole, regress_login, +- regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; ++ regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, cannot grant a role into itself +-CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; ++CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, can grant other users into a role with admin option + CREATE ROLE regress_adminroles ADMIN + regress_role_super, regress_createdb, regress_createrole, regress_login, +- regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; ++ regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, cannot grant a role into itself with admin option +-CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; ++CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- fail, regress_createrole does not have CREATEDB privilege + SET SESSION AUTHORIZATION regress_createrole; + CREATE DATABASE regress_nosuch_db; + + -- ok, regress_createrole can create new roles +-CREATE ROLE regress_plainrole; ++CREATE ROLE regress_plainrole PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, roles with CREATEROLE can create new roles with it +-CREATE ROLE regress_rolecreator CREATEROLE; ++CREATE ROLE regress_rolecreator CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, roles with CREATEROLE can create new roles with different role + -- attributes, including CREATEROLE +-CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; ++CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- ok, we should be able to modify a role we created + COMMENT ON ROLE regress_hasprivs IS 'some comment'; +@@ -123,7 +123,7 @@ REASSIGN OWNED BY regress_tenant TO regress_createrole; + + -- ok, create a role with a value for createrole_self_grant + SET createrole_self_grant = 'set, inherit'; +-CREATE ROLE regress_tenant2; ++CREATE ROLE regress_tenant2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT CREATE ON DATABASE regression TO regress_tenant2; + + -- ok, regress_tenant2 can create objects within the database +@@ -150,16 +150,16 @@ ALTER TABLE tenant2_table OWNER TO regress_tenant2; + DROP TABLE tenant2_table; + + -- fail, CREATEROLE is not enough to create roles in privileged roles +-CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; +-CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; +-CREATE ROLE regress_monitor IN ROLE pg_monitor; +-CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; +-CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; +-CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; +-CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; +-CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; +-CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; +-CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; ++CREATE ROLE regress_read_all_data PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_data; ++CREATE ROLE regress_write_all_data PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_write_all_data; ++CREATE ROLE regress_monitor PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_monitor; ++CREATE ROLE regress_read_all_settings PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_settings; ++CREATE ROLE regress_read_all_stats PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_all_stats; ++CREATE ROLE regress_stat_scan_tables PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_stat_scan_tables; ++CREATE ROLE regress_read_server_files PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_read_server_files; ++CREATE ROLE regress_write_server_files PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_write_server_files; ++CREATE ROLE regress_execute_server_program PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_execute_server_program; ++CREATE ROLE regress_signal_backend PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_signal_backend; + + -- fail, role still owns database objects + DROP ROLE regress_tenant; +diff --git a/src/test/regress/sql/create_schema.sql b/src/test/regress/sql/create_schema.sql +index 1b7064247a..be5b662ce1 100644 +--- a/src/test/regress/sql/create_schema.sql ++++ b/src/test/regress/sql/create_schema.sql +@@ -4,7 +4,7 @@ + + -- Schema creation with elements. + +-CREATE ROLE regress_create_schema_role SUPERUSER; ++CREATE ROLE regress_create_schema_role SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- Cases where schema creation fails as objects are qualified with a schema + -- that does not match with what's expected. +diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql +index 3a78be1b0c..617d2dc8d6 100644 +--- a/src/test/regress/sql/create_view.sql ++++ b/src/test/regress/sql/create_view.sql +@@ -23,7 +23,8 @@ CREATE TABLE real_city ( + ); + + \set filename :abs_srcdir '/data/real_city.data' +-COPY real_city FROM :'filename'; ++\set command '\\copy real_city FROM ' :'filename'; ++:command + ANALYZE real_city; + + SELECT * +diff --git a/src/test/regress/sql/database.sql b/src/test/regress/sql/database.sql +index 0367c0e37a..a23b98c4bd 100644 +--- a/src/test/regress/sql/database.sql ++++ b/src/test/regress/sql/database.sql +@@ -1,8 +1,6 @@ + CREATE DATABASE regression_tbd + ENCODING utf8 LC_COLLATE "C" LC_CTYPE "C" TEMPLATE template0; + ALTER DATABASE regression_tbd RENAME TO regression_utf8; +-ALTER DATABASE regression_utf8 SET TABLESPACE regress_tblspace; +-ALTER DATABASE regression_utf8 RESET TABLESPACE; + ALTER DATABASE regression_utf8 CONNECTION_LIMIT 123; + + -- Test PgDatabaseToastTable. Doing this with GRANT would be slow. +diff --git a/src/test/regress/sql/dependency.sql b/src/test/regress/sql/dependency.sql +index 2559c62d0b..06c3aa1a36 100644 +--- a/src/test/regress/sql/dependency.sql ++++ b/src/test/regress/sql/dependency.sql +@@ -2,10 +2,10 @@ + -- DEPENDENCIES + -- + +-CREATE USER regress_dep_user; +-CREATE USER regress_dep_user2; +-CREATE USER regress_dep_user3; +-CREATE GROUP regress_dep_group; ++CREATE USER regress_dep_user PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_dep_group PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE TABLE deptest (f1 serial primary key, f2 text); + +@@ -45,9 +45,9 @@ DROP TABLE deptest; + DROP USER regress_dep_user3; + + -- Test DROP OWNED +-CREATE USER regress_dep_user0; +-CREATE USER regress_dep_user1; +-CREATE USER regress_dep_user2; ++CREATE USER regress_dep_user0 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_dep_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_dep_user0; + -- permission denied + DROP OWNED BY regress_dep_user1; +diff --git a/src/test/regress/sql/drop_if_exists.sql b/src/test/regress/sql/drop_if_exists.sql +index ac6168b91f..4270062ec7 100644 +--- a/src/test/regress/sql/drop_if_exists.sql ++++ b/src/test/regress/sql/drop_if_exists.sql +@@ -86,9 +86,9 @@ DROP DOMAIN test_domain_exists; + --- role/user/group + --- + +-CREATE USER regress_test_u1; +-CREATE ROLE regress_test_r1; +-CREATE GROUP regress_test_g1; ++CREATE USER regress_test_u1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_r1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_test_g1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + DROP USER regress_test_u2; + +diff --git a/src/test/regress/sql/equivclass.sql b/src/test/regress/sql/equivclass.sql +index 247b0a3105..bf018fd3a1 100644 +--- a/src/test/regress/sql/equivclass.sql ++++ b/src/test/regress/sql/equivclass.sql +@@ -230,7 +230,7 @@ set enable_mergejoin = off; + alter table ec1 enable row level security; + create policy p1 on ec1 using (f1 < '5'::int8alias1); + +-create user regress_user_ectest; ++create user regress_user_ectest PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant select on ec0 to regress_user_ectest; + grant select on ec1 to regress_user_ectest; + +diff --git a/src/test/regress/sql/event_trigger.sql b/src/test/regress/sql/event_trigger.sql +index 1aeaddbe71..89a410ec4a 100644 +--- a/src/test/regress/sql/event_trigger.sql ++++ b/src/test/regress/sql/event_trigger.sql +@@ -86,7 +86,7 @@ create event trigger regress_event_trigger2 on ddl_command_start + comment on event trigger regress_event_trigger is 'test comment'; + + -- drop as non-superuser should fail +-create role regress_evt_user; ++create role regress_evt_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + set role regress_evt_user; + create event trigger regress_event_trigger_noperms on ddl_command_start + execute procedure test_event_trigger(); +diff --git a/src/test/regress/sql/foreign_data.sql b/src/test/regress/sql/foreign_data.sql +index aa147b14a9..370e0dd570 100644 +--- a/src/test/regress/sql/foreign_data.sql ++++ b/src/test/regress/sql/foreign_data.sql +@@ -22,14 +22,14 @@ DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_r + + RESET client_min_messages; + +-CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER; ++CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_foreign_data_user'; + +-CREATE ROLE regress_test_role; +-CREATE ROLE regress_test_role2; +-CREATE ROLE regress_test_role_super SUPERUSER; +-CREATE ROLE regress_test_indirect; +-CREATE ROLE regress_unprivileged_role; ++CREATE ROLE regress_test_role PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_role_super SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_test_indirect PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_unprivileged_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE FOREIGN DATA WRAPPER dummy; + COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless'; +diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql +index 22e177f89b..7138d5e1d4 100644 +--- a/src/test/regress/sql/foreign_key.sql ++++ b/src/test/regress/sql/foreign_key.sql +@@ -1418,7 +1418,7 @@ ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + -- leave these tables around intentionally + + -- test the case when the referenced table is owned by a different user +-create role regress_other_partitioned_fk_owner; ++create role regress_other_partitioned_fk_owner PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner; + set role regress_other_partitioned_fk_owner; + create table other_partitioned_fk(a int, b int) partition by list (a); +diff --git a/src/test/regress/sql/generated.sql b/src/test/regress/sql/generated.sql +index 298f6b3aa8..f058913ae0 100644 +--- a/src/test/regress/sql/generated.sql ++++ b/src/test/regress/sql/generated.sql +@@ -263,7 +263,7 @@ ALTER TABLE gtest10a DROP COLUMN b; + INSERT INTO gtest10a (a) VALUES (1); + + -- privileges +-CREATE USER regress_user11; ++CREATE USER regress_user11 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE TABLE gtest11s (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2) STORED); + INSERT INTO gtest11s VALUES (1, 10), (2, 20); +diff --git a/src/test/regress/sql/guc.sql b/src/test/regress/sql/guc.sql +index dc79761955..a9ead75349 100644 +--- a/src/test/regress/sql/guc.sql ++++ b/src/test/regress/sql/guc.sql +@@ -188,7 +188,7 @@ PREPARE foo AS SELECT 1; + LISTEN foo_event; + SET vacuum_cost_delay = 13; + CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS; +-CREATE ROLE regress_guc_user; ++CREATE ROLE regress_guc_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_guc_user; + -- look changes + SELECT pg_listening_channels(); +diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql +index 527024f710..de49c0b85f 100644 +--- a/src/test/regress/sql/hash_index.sql ++++ b/src/test/regress/sql/hash_index.sql +@@ -26,10 +26,14 @@ CREATE TABLE hash_f8_heap ( + ); + + \set filename :abs_srcdir '/data/hash.data' +-COPY hash_i4_heap FROM :'filename'; +-COPY hash_name_heap FROM :'filename'; +-COPY hash_txt_heap FROM :'filename'; +-COPY hash_f8_heap FROM :'filename'; ++\set command '\\copy hash_i4_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_name_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_txt_heap FROM ' :'filename'; ++:command ++\set command '\\copy hash_f8_heap FROM ' :'filename'; ++:command + + -- the data in this file has a lot of duplicates in the index key + -- fields, leading to long bucket chains and lots of table expansion. +diff --git a/src/test/regress/sql/identity.sql b/src/test/regress/sql/identity.sql +index 91d2e443b4..241c93f373 100644 +--- a/src/test/regress/sql/identity.sql ++++ b/src/test/regress/sql/identity.sql +@@ -287,7 +287,7 @@ ALTER TABLE itest7 ALTER COLUMN a RESTART; + ALTER TABLE itest7 ALTER COLUMN a DROP IDENTITY; + + -- privileges +-CREATE USER regress_identity_user1; ++CREATE USER regress_identity_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE itest8 (a int GENERATED ALWAYS AS IDENTITY, b text); + GRANT SELECT, INSERT ON itest8 TO regress_identity_user1; + SET ROLE regress_identity_user1; +diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql +index fe699c54d5..bdd5993f45 100644 +--- a/src/test/regress/sql/inherit.sql ++++ b/src/test/regress/sql/inherit.sql +@@ -950,7 +950,7 @@ create index on permtest_parent (left(c, 3)); + insert into permtest_parent + select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i; + analyze permtest_parent; +-create role regress_no_child_access; ++create role regress_no_child_access PASSWORD NEON_PASSWORD_PLACEHOLDER; + revoke all on permtest_grandchild from regress_no_child_access; + grant select on permtest_parent to regress_no_child_access; + set session authorization regress_no_child_access; +diff --git a/src/test/regress/sql/insert.sql b/src/test/regress/sql/insert.sql +index 2b086eeb6d..913d8a0aed 100644 +--- a/src/test/regress/sql/insert.sql ++++ b/src/test/regress/sql/insert.sql +@@ -513,7 +513,7 @@ drop table mlparted5; + create table key_desc (a int, b int) partition by list ((a+0)); + create table key_desc_1 partition of key_desc for values in (1) partition by range (b); + +-create user regress_insert_other_user; ++create user regress_insert_other_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant select (a) on key_desc_1 to regress_insert_other_user; + grant insert on key_desc to regress_insert_other_user; + +@@ -597,7 +597,7 @@ insert into brtrigpartcon1 values (1, 'hi there'); + -- check that the message shows the appropriate column description in a + -- situation where the partitioned table is not the primary ModifyTable node + create table inserttest3 (f1 text default 'foo', f2 text default 'bar', f3 int); +-create role regress_coldesc_role; ++create role regress_coldesc_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + grant insert on inserttest3 to regress_coldesc_role; + grant insert on brtrigpartcon to regress_coldesc_role; + revoke select on brtrigpartcon from regress_coldesc_role; +diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql +index 6dae715afd..aa320ba7be 100644 +--- a/src/test/regress/sql/jsonb.sql ++++ b/src/test/regress/sql/jsonb.sql +@@ -6,7 +6,8 @@ CREATE TABLE testjsonb ( + ); + + \set filename :abs_srcdir '/data/jsonb.data' +-COPY testjsonb FROM :'filename'; ++\set command '\\copy testjsonb FROM ' :'filename'; ++:command + + -- Strings. + SELECT '""'::jsonb; -- OK. +diff --git a/src/test/regress/sql/largeobject.sql b/src/test/regress/sql/largeobject.sql +index a4aee02e3a..8839c9496a 100644 +--- a/src/test/regress/sql/largeobject.sql ++++ b/src/test/regress/sql/largeobject.sql +@@ -10,7 +10,7 @@ + SET bytea_output TO escape; + + -- Test ALTER LARGE OBJECT OWNER +-CREATE ROLE regress_lo_user; ++CREATE ROLE regress_lo_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + SELECT lo_create(42); + ALTER LARGE OBJECT 42 OWNER TO regress_lo_user; + +@@ -189,7 +189,8 @@ SELECT lo_unlink(loid) from lotest_stash_values; + TRUNCATE lotest_stash_values; + + \set filename :abs_srcdir '/data/tenk.data' +-INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); ++\lo_import :filename ++INSERT INTO lotest_stash_values (loid) VALUES (:LASTOID); + + BEGIN; + UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +@@ -219,8 +220,8 @@ SELECT lo_close(fd) FROM lotest_stash_values; + END; + + \set filename :abs_builddir '/results/lotest.txt' +-SELECT lo_export(loid, :'filename') FROM lotest_stash_values; +- ++SELECT loid FROM lotest_stash_values \gset ++\lo_export :loid, :filename + \lo_import :filename + + \set newloid :LASTOID +diff --git a/src/test/regress/sql/lock.sql b/src/test/regress/sql/lock.sql +index b88488c6d0..78b31e6dd3 100644 +--- a/src/test/regress/sql/lock.sql ++++ b/src/test/regress/sql/lock.sql +@@ -19,7 +19,7 @@ CREATE VIEW lock_view3 AS SELECT * from lock_view2; + CREATE VIEW lock_view4 AS SELECT (select a from lock_tbl1a limit 1) from lock_tbl1; + CREATE VIEW lock_view5 AS SELECT * from lock_tbl1 where a in (select * from lock_tbl1a); + CREATE VIEW lock_view6 AS SELECT * from (select * from lock_tbl1) sub; +-CREATE ROLE regress_rol_lock1; ++CREATE ROLE regress_rol_lock1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER ROLE regress_rol_lock1 SET search_path = lock_schema1; + GRANT USAGE ON SCHEMA lock_schema1 TO regress_rol_lock1; + +diff --git a/src/test/regress/sql/matview.sql b/src/test/regress/sql/matview.sql +index 235123de1e..58e73cec5d 100644 +--- a/src/test/regress/sql/matview.sql ++++ b/src/test/regress/sql/matview.sql +@@ -209,7 +209,7 @@ SELECT * FROM mvtest_mv_v; + DROP TABLE mvtest_v CASCADE; + + -- make sure running as superuser works when MV owned by another role (bug #11208) +-CREATE ROLE regress_user_mvtest; ++CREATE ROLE regress_user_mvtest PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET ROLE regress_user_mvtest; + -- this test case also checks for ambiguity in the queries issued by + -- refresh_by_match_merge(), by choosing column names that intentionally +@@ -264,7 +264,7 @@ ROLLBACK; + + -- INSERT privileges if relation owner is not allowed to insert. + CREATE SCHEMA matview_schema; +-CREATE USER regress_matview_user; ++CREATE USER regress_matview_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER DEFAULT PRIVILEGES FOR ROLE regress_matview_user + REVOKE INSERT ON TABLES FROM regress_matview_user; + GRANT ALL ON SCHEMA matview_schema TO public; +diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql +index 2a220a248f..91a404d51e 100644 +--- a/src/test/regress/sql/merge.sql ++++ b/src/test/regress/sql/merge.sql +@@ -2,9 +2,9 @@ + -- MERGE + -- + +-CREATE USER regress_merge_privs; +-CREATE USER regress_merge_no_privs; +-CREATE USER regress_merge_none; ++CREATE USER regress_merge_privs PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_merge_no_privs PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_merge_none PASSWORD NEON_PASSWORD_PLACEHOLDER; + + DROP TABLE IF EXISTS target; + DROP TABLE IF EXISTS source; +diff --git a/src/test/regress/sql/misc.sql b/src/test/regress/sql/misc.sql +index 165a2e175f..08d7096e2c 100644 +--- a/src/test/regress/sql/misc.sql ++++ b/src/test/regress/sql/misc.sql +@@ -74,22 +74,26 @@ DROP TABLE tmp; + -- copy + -- + \set filename :abs_builddir '/results/onek.data' +-COPY onek TO :'filename'; ++\set command '\\copy onek TO ' :'filename'; ++:command + + CREATE TEMP TABLE onek_copy (LIKE onek); + +-COPY onek_copy FROM :'filename'; ++\set command '\\copy onek_copy FROM ' :'filename'; ++:command + + SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; + + SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; + + \set filename :abs_builddir '/results/stud_emp.data' +-COPY BINARY stud_emp TO :'filename'; ++\set command '\\COPY BINARY stud_emp TO ' :'filename'; ++:command + + CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); + +-COPY BINARY stud_emp_copy FROM :'filename'; ++\set command '\\COPY BINARY stud_emp_copy FROM ' :'filename'; ++:command + + SELECT * FROM stud_emp_copy; + +diff --git a/src/test/regress/sql/misc_functions.sql b/src/test/regress/sql/misc_functions.sql +index b57f01f3e9..3e05aa6400 100644 +--- a/src/test/regress/sql/misc_functions.sql ++++ b/src/test/regress/sql/misc_functions.sql +@@ -82,7 +82,7 @@ SELECT pg_log_backend_memory_contexts(pg_backend_pid()); + SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity + WHERE backend_type = 'checkpointer'; + +-CREATE ROLE regress_log_memory; ++CREATE ROLE regress_log_memory PASSWORD NEON_PASSWORD_PLACEHOLDER; + + SELECT has_function_privilege('regress_log_memory', + 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no +@@ -169,7 +169,7 @@ select count(*) > 0 from + -- + -- Test replication slot directory functions + -- +-CREATE ROLE regress_slot_dir_funcs; ++CREATE ROLE regress_slot_dir_funcs PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- Not available by default. + SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalsnapdir()', 'EXECUTE'); +diff --git a/src/test/regress/sql/object_address.sql b/src/test/regress/sql/object_address.sql +index 1a6c61f49d..1c31ac6a53 100644 +--- a/src/test/regress/sql/object_address.sql ++++ b/src/test/regress/sql/object_address.sql +@@ -7,7 +7,7 @@ SET client_min_messages TO 'warning'; + DROP ROLE IF EXISTS regress_addr_user; + RESET client_min_messages; + +-CREATE USER regress_addr_user; ++CREATE USER regress_addr_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- Test generic object addressing/identification functions + CREATE SCHEMA addr_nsp; +diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql +index 53e86b0b6c..f07cf1ec54 100644 +--- a/src/test/regress/sql/password.sql ++++ b/src/test/regress/sql/password.sql +@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok + + -- consistency of password entries + SET password_encryption = 'md5'; +-CREATE ROLE regress_passwd1 PASSWORD 'role_pwd1'; +-CREATE ROLE regress_passwd2 PASSWORD 'role_pwd2'; ++CREATE ROLE regress_passwd1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET password_encryption = 'scram-sha-256'; +-CREATE ROLE regress_passwd3 PASSWORD 'role_pwd3'; +-CREATE ROLE regress_passwd4 PASSWORD NULL; ++CREATE ROLE regress_passwd3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- check list of created entries + -- +@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2; + SET password_encryption = 'md5'; + + -- encrypt with MD5 +-ALTER ROLE regress_passwd2 PASSWORD 'foo'; ++ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- already encrypted, use as they are + ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70'; + ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo='; + + SET password_encryption = 'scram-sha-256'; + -- create SCRAM secret +-ALTER ROLE regress_passwd4 PASSWORD 'foo'; ++ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; + -- already encrypted with MD5, use as it is + CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023'; + +diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql +index 3f68cafcd1..004b26831d 100644 +--- a/src/test/regress/sql/privileges.sql ++++ b/src/test/regress/sql/privileges.sql +@@ -24,18 +24,18 @@ RESET client_min_messages; + + -- test proper begins here + +-CREATE USER regress_priv_user1; +-CREATE USER regress_priv_user2; +-CREATE USER regress_priv_user3; +-CREATE USER regress_priv_user4; +-CREATE USER regress_priv_user5; +-CREATE USER regress_priv_user5; -- duplicate +-CREATE USER regress_priv_user6; +-CREATE USER regress_priv_user7; +-CREATE USER regress_priv_user8; +-CREATE USER regress_priv_user9; +-CREATE USER regress_priv_user10; +-CREATE ROLE regress_priv_role; ++CREATE USER regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user4 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; -- duplicate ++CREATE USER regress_priv_user6 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user7 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user8 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user9 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user10 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_priv_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- circular ADMIN OPTION grants should be disallowed + GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION; +@@ -84,11 +84,11 @@ DROP ROLE regress_priv_user5; -- should fail, dependency + DROP ROLE regress_priv_user1, regress_priv_user5; -- ok, despite order + + -- recreate the roles we just dropped +-CREATE USER regress_priv_user1; +-CREATE USER regress_priv_user2; +-CREATE USER regress_priv_user3; +-CREATE USER regress_priv_user4; +-CREATE USER regress_priv_user5; ++CREATE USER regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user4 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_priv_user5 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + GRANT pg_read_all_data TO regress_priv_user6; + GRANT pg_write_all_data TO regress_priv_user7; +@@ -130,8 +130,8 @@ DROP USER regress_priv_user10; + DROP USER regress_priv_user9; + DROP USER regress_priv_user8; + +-CREATE GROUP regress_priv_group1; +-CREATE GROUP regress_priv_group2 WITH ADMIN regress_priv_user1 USER regress_priv_user2; ++CREATE GROUP regress_priv_group1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE GROUP regress_priv_group2 WITH ADMIN regress_priv_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER USER regress_priv_user2; + + ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4; + +@@ -1124,7 +1124,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP + + -- security-restricted operations + \c - +-CREATE ROLE regress_sro_user; ++CREATE ROLE regress_sro_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- Check that index expressions and predicates are run as the table's owner + +@@ -1620,8 +1620,8 @@ DROP SCHEMA testns CASCADE; + -- Change owner of the schema & and rename of new schema owner + \c - + +-CREATE ROLE regress_schemauser1 superuser login; +-CREATE ROLE regress_schemauser2 superuser login; ++CREATE ROLE regress_schemauser1 superuser login PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_schemauser2 superuser login PASSWORD NEON_PASSWORD_PLACEHOLDER; + + SET SESSION ROLE regress_schemauser1; + CREATE SCHEMA testns; +@@ -1715,7 +1715,7 @@ DROP USER regress_priv_user8; -- does not exist + + + -- permissions with LOCK TABLE +-CREATE USER regress_locktable_user; ++CREATE USER regress_locktable_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE TABLE lock_table (a int); + + -- LOCK TABLE and SELECT permission +@@ -1803,7 +1803,7 @@ DROP USER regress_locktable_user; + -- switch to superuser + \c - + +-CREATE ROLE regress_readallstats; ++CREATE ROLE regress_readallstats PASSWORD NEON_PASSWORD_PLACEHOLDER; + + SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no + SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no +@@ -1823,10 +1823,10 @@ RESET ROLE; + DROP ROLE regress_readallstats; + + -- test role grantor machinery +-CREATE ROLE regress_group; +-CREATE ROLE regress_group_direct_manager; +-CREATE ROLE regress_group_indirect_manager; +-CREATE ROLE regress_group_member; ++CREATE ROLE regress_group PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_direct_manager PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_indirect_manager PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_group_member PASSWORD NEON_PASSWORD_PLACEHOLDER; + + GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; + GRANT regress_group_direct_manager TO regress_group_indirect_manager; +@@ -1848,9 +1848,9 @@ DROP ROLE regress_group_indirect_manager; + DROP ROLE regress_group_member; + + -- test SET and INHERIT options with object ownership changes +-CREATE ROLE regress_roleoption_protagonist; +-CREATE ROLE regress_roleoption_donor; +-CREATE ROLE regress_roleoption_recipient; ++CREATE ROLE regress_roleoption_protagonist PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_roleoption_donor PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_roleoption_recipient PASSWORD NEON_PASSWORD_PLACEHOLDER; + CREATE SCHEMA regress_roleoption; + GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; + GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; +diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql +index f3bc6cd07e..f1a2f58069 100644 +--- a/src/test/regress/sql/psql.sql ++++ b/src/test/regress/sql/psql.sql +@@ -496,7 +496,7 @@ select 1 where false; + \pset expanded off + + CREATE SCHEMA tableam_display; +-CREATE ROLE regress_display_role; ++CREATE ROLE regress_display_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER SCHEMA tableam_display OWNER TO regress_display_role; + SET search_path TO tableam_display; + CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; +@@ -1174,7 +1174,7 @@ select 1/(15-unique2) from tenk1 order by unique2 limit 19; + \unset FETCH_COUNT + + create schema testpart; +-create role regress_partitioning_role; ++create role regress_partitioning_role PASSWORD NEON_PASSWORD_PLACEHOLDER; + + alter schema testpart owner to regress_partitioning_role; + +@@ -1285,7 +1285,7 @@ reset work_mem; + + -- check \df+ + -- we have to use functions with a predictable owner name, so make a role +-create role regress_psql_user superuser; ++create role regress_psql_user superuser PASSWORD NEON_PASSWORD_PLACEHOLDER; + begin; + set session authorization regress_psql_user; + +@@ -1431,11 +1431,14 @@ CREATE TEMPORARY TABLE reload_output( + ); + + SELECT 1 AS a \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + + SELECT line FROM reload_output ORDER BY lineno; + TRUNCATE TABLE reload_output; +@@ -1452,17 +1455,20 @@ SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; + -- COPY TO file + -- The data goes to :g_out_file and the status to :o_out_file + \set QUIET false +-COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; ++\set command '\\COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO ' :'g_out_file'; ++:command + -- DML command status + UPDATE onek SET unique1 = unique1 WHERE false; + \set QUIET true + \o + + -- Check the contents of the files generated. +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + TRUNCATE TABLE reload_output; +-COPY reload_output(line) FROM :'o_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'o_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + TRUNCATE TABLE reload_output; + +@@ -1475,10 +1481,12 @@ COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file + \o + + -- Check the contents of the files generated. +-COPY reload_output(line) FROM :'g_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'g_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + TRUNCATE TABLE reload_output; +-COPY reload_output(line) FROM :'o_out_file'; ++\set command '\\COPY reload_output(line) FROM ' :'o_out_file'; ++:command + SELECT line FROM reload_output ORDER BY lineno; + + DROP TABLE reload_output; +@@ -1825,10 +1833,10 @@ DROP FUNCTION psql_error; + \dX "no.such.database"."no.such.schema"."no.such.extended.statistics" + + -- check \drg and \du +-CREATE ROLE regress_du_role0; +-CREATE ROLE regress_du_role1; +-CREATE ROLE regress_du_role2; +-CREATE ROLE regress_du_admin; ++CREATE ROLE regress_du_role0 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_role1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_du_admin PASSWORD NEON_PASSWORD_PLACEHOLDER; + + GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; + GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; +diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql +index d5051a5e74..b32d729271 100644 +--- a/src/test/regress/sql/publication.sql ++++ b/src/test/regress/sql/publication.sql +@@ -1,9 +1,9 @@ + -- + -- PUBLICATION + -- +-CREATE ROLE regress_publication_user LOGIN SUPERUSER; +-CREATE ROLE regress_publication_user2; +-CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER; ++CREATE ROLE regress_publication_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_publication_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_publication_user'; + + -- suppress warning that depends on wal_level +@@ -801,7 +801,7 @@ DROP PUBLICATION testpub2; + DROP PUBLICATION testpub3; + + SET ROLE regress_publication_user; +-CREATE ROLE regress_publication_user3; ++CREATE ROLE regress_publication_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT regress_publication_user2 TO regress_publication_user3; + SET client_min_messages = 'ERROR'; + CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test; +diff --git a/src/test/regress/sql/regproc.sql b/src/test/regress/sql/regproc.sql +index de2aa881a8..41a675fd35 100644 +--- a/src/test/regress/sql/regproc.sql ++++ b/src/test/regress/sql/regproc.sql +@@ -4,7 +4,7 @@ + + /* If objects exist, return oids */ + +-CREATE ROLE regress_regrole_test; ++CREATE ROLE regress_regrole_test PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- without schemaname + +diff --git a/src/test/regress/sql/roleattributes.sql b/src/test/regress/sql/roleattributes.sql +index c961b2d730..0859b89c4f 100644 +--- a/src/test/regress/sql/roleattributes.sql ++++ b/src/test/regress/sql/roleattributes.sql +@@ -1,83 +1,83 @@ + -- default for superuser is false +-CREATE ROLE regress_test_def_superuser; ++CREATE ROLE regress_test_def_superuser PASSWORD NEON_PASSWORD_PLACEHOLDER; + +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_superuser'; +-CREATE ROLE regress_test_superuser WITH SUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_superuser'; ++CREATE ROLE regress_test_superuser WITH SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + ALTER ROLE regress_test_superuser WITH NOSUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + ALTER ROLE regress_test_superuser WITH SUPERUSER; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + + -- default for inherit is true +-CREATE ROLE regress_test_def_inherit; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_inherit'; +-CREATE ROLE regress_test_inherit WITH NOINHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++CREATE ROLE regress_test_def_inherit PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_inherit'; ++CREATE ROLE regress_test_inherit WITH NOINHERIT PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + ALTER ROLE regress_test_inherit WITH INHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + ALTER ROLE regress_test_inherit WITH NOINHERIT; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + + -- default for create role is false +-CREATE ROLE regress_test_def_createrole; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createrole'; +-CREATE ROLE regress_test_createrole WITH CREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++CREATE ROLE regress_test_def_createrole PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createrole'; ++CREATE ROLE regress_test_createrole WITH CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + ALTER ROLE regress_test_createrole WITH NOCREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + ALTER ROLE regress_test_createrole WITH CREATEROLE; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + + -- default for create database is false +-CREATE ROLE regress_test_def_createdb; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createdb'; +-CREATE ROLE regress_test_createdb WITH CREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++CREATE ROLE regress_test_def_createdb PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createdb'; ++CREATE ROLE regress_test_createdb WITH CREATEDB PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + ALTER ROLE regress_test_createdb WITH NOCREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + ALTER ROLE regress_test_createdb WITH CREATEDB; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + + -- default for can login is false for role +-CREATE ROLE regress_test_def_role_canlogin; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin'; +-CREATE ROLE regress_test_role_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++CREATE ROLE regress_test_def_role_canlogin PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin'; ++CREATE ROLE regress_test_role_canlogin WITH LOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + ALTER ROLE regress_test_role_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + ALTER ROLE regress_test_role_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + + -- default for can login is true for user +-CREATE USER regress_test_def_user_canlogin; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin'; +-CREATE USER regress_test_user_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++CREATE USER regress_test_def_user_canlogin PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin'; ++CREATE USER regress_test_user_canlogin WITH NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + ALTER USER regress_test_user_canlogin WITH LOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + ALTER USER regress_test_user_canlogin WITH NOLOGIN; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + + -- default for replication is false +-CREATE ROLE regress_test_def_replication; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_replication'; +-CREATE ROLE regress_test_replication WITH REPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++CREATE ROLE regress_test_def_replication PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_replication'; ++CREATE ROLE regress_test_replication WITH REPLICATION PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + ALTER ROLE regress_test_replication WITH NOREPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + ALTER ROLE regress_test_replication WITH REPLICATION; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + + -- default for bypassrls is false +-CREATE ROLE regress_test_def_bypassrls; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls'; +-CREATE ROLE regress_test_bypassrls WITH BYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++CREATE ROLE regress_test_def_bypassrls PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls'; ++CREATE ROLE regress_test_bypassrls WITH BYPASSRLS PASSWORD NEON_PASSWORD_PLACEHOLDER; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + ALTER ROLE regress_test_bypassrls WITH NOBYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + ALTER ROLE regress_test_bypassrls WITH BYPASSRLS; +-SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; ++SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:'), rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + + -- clean up roles + DROP ROLE regress_test_def_superuser; +diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql +index dec7340538..cdbc03a5cc 100644 +--- a/src/test/regress/sql/rowsecurity.sql ++++ b/src/test/regress/sql/rowsecurity.sql +@@ -20,13 +20,13 @@ DROP SCHEMA IF EXISTS regress_rls_schema CASCADE; + RESET client_min_messages; + + -- initial setup +-CREATE USER regress_rls_alice NOLOGIN; +-CREATE USER regress_rls_bob NOLOGIN; +-CREATE USER regress_rls_carol NOLOGIN; +-CREATE USER regress_rls_dave NOLOGIN; +-CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN; +-CREATE ROLE regress_rls_group1 NOLOGIN; +-CREATE ROLE regress_rls_group2 NOLOGIN; ++CREATE USER regress_rls_alice NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_bob NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_carol NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_dave NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_group1 NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_group2 NOLOGIN PASSWORD NEON_PASSWORD_PLACEHOLDER; + + GRANT regress_rls_group1 TO regress_rls_bob; + GRANT regress_rls_group2 TO regress_rls_carol; +@@ -2065,8 +2065,8 @@ SELECT count(*) = 0 FROM pg_depend + -- DROP OWNED BY testing + RESET SESSION AUTHORIZATION; + +-CREATE ROLE regress_rls_dob_role1; +-CREATE ROLE regress_rls_dob_role2; ++CREATE ROLE regress_rls_dob_role1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_rls_dob_role2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE TABLE dob_t1 (c1 int); + CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); +diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql +index 8b7e255dcd..c58d095c05 100644 +--- a/src/test/regress/sql/rules.sql ++++ b/src/test/regress/sql/rules.sql +@@ -1356,7 +1356,7 @@ DROP TABLE ruletest2; + -- Test non-SELECT rule on security invoker view. + -- Should use view owner's permissions. + -- +-CREATE USER regress_rule_user1; ++CREATE USER regress_rule_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE TABLE ruletest_t1 (x int); + CREATE TABLE ruletest_t2 (x int); +diff --git a/src/test/regress/sql/security_label.sql b/src/test/regress/sql/security_label.sql +index 98e6a5f211..68c868fef2 100644 +--- a/src/test/regress/sql/security_label.sql ++++ b/src/test/regress/sql/security_label.sql +@@ -10,8 +10,8 @@ DROP ROLE IF EXISTS regress_seclabel_user2; + + RESET client_min_messages; + +-CREATE USER regress_seclabel_user1 WITH CREATEROLE; +-CREATE USER regress_seclabel_user2; ++CREATE USER regress_seclabel_user1 WITH CREATEROLE PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_seclabel_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE TABLE seclabel_tbl1 (a int, b text); + CREATE TABLE seclabel_tbl2 (x int, y text); +diff --git a/src/test/regress/sql/select_into.sql b/src/test/regress/sql/select_into.sql +index 689c448cc2..223ceb1d75 100644 +--- a/src/test/regress/sql/select_into.sql ++++ b/src/test/regress/sql/select_into.sql +@@ -20,7 +20,7 @@ DROP TABLE sitmp1; + -- SELECT INTO and INSERT permission, if owner is not allowed to insert. + -- + CREATE SCHEMA selinto_schema; +-CREATE USER regress_selinto_user; ++CREATE USER regress_selinto_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user + REVOKE INSERT ON TABLES FROM regress_selinto_user; + GRANT ALL ON SCHEMA selinto_schema TO public; +diff --git a/src/test/regress/sql/select_views.sql b/src/test/regress/sql/select_views.sql +index e742f13699..7bd0255df8 100644 +--- a/src/test/regress/sql/select_views.sql ++++ b/src/test/regress/sql/select_views.sql +@@ -12,7 +12,7 @@ SELECT * FROM toyemp WHERE name = 'sharon'; + -- + -- Test for Leaky view scenario + -- +-CREATE ROLE regress_alice; ++CREATE ROLE regress_alice PASSWORD NEON_PASSWORD_PLACEHOLDER; + + CREATE FUNCTION f_leak (text) + RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001 +diff --git a/src/test/regress/sql/sequence.sql b/src/test/regress/sql/sequence.sql +index 793f1415f6..ec07c1f193 100644 +--- a/src/test/regress/sql/sequence.sql ++++ b/src/test/regress/sql/sequence.sql +@@ -293,7 +293,7 @@ ROLLBACK; + + -- privileges tests + +-CREATE USER regress_seq_user; ++CREATE USER regress_seq_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + + -- nextval + BEGIN; +diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql +index 1e21e55c6d..2251f50c5e 100644 +--- a/src/test/regress/sql/stats.sql ++++ b/src/test/regress/sql/stats.sql +@@ -622,23 +622,6 @@ SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; + SELECT current_setting('fsync') = 'off' + OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; + +--- Change the tablespace so that the table is rewritten directly, then SELECT +--- from it to cause it to be read back into shared buffers. +-SELECT sum(reads) AS io_sum_shared_before_reads +- FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +--- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly +--- rewritten table, e.g. by autovacuum. +-BEGIN; +-ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; +--- SELECT from the table so that the data is read into shared buffers and +--- context 'normal', object 'relation' reads are counted. +-SELECT COUNT(*) FROM test_io_shared; +-COMMIT; +-SELECT pg_stat_force_next_flush(); +-SELECT sum(reads) AS io_sum_shared_after_reads +- FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +-SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; +- + SELECT sum(hits) AS io_sum_shared_before_hits + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset + -- Select from the table again to count hits. +diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql +index 1b80d3687b..4d8798b0b1 100644 +--- a/src/test/regress/sql/stats_ext.sql ++++ b/src/test/regress/sql/stats_ext.sql +@@ -50,7 +50,7 @@ DROP TABLE ext_stats_test; + CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); + CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; + COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; +-CREATE ROLE regress_stats_ext; ++CREATE ROLE regress_stats_ext PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION regress_stats_ext; + COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; + DROP STATISTICS ab1_a_b_stats; +@@ -1607,7 +1607,7 @@ drop statistics stts_t1_expr_expr_stat; + set search_path to public, stts_s1; + \dX + +-create role regress_stats_ext nosuperuser; ++create role regress_stats_ext nosuperuser PASSWORD NEON_PASSWORD_PLACEHOLDER; + set role regress_stats_ext; + \dX + reset role; +@@ -1618,7 +1618,7 @@ drop user regress_stats_ext; + reset search_path; + + -- User with no access +-CREATE USER regress_stats_user1; ++CREATE USER regress_stats_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; + SET SESSION AUTHORIZATION regress_stats_user1; + SELECT * FROM tststats.priv_test_tbl; -- Permission denied +diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/sql/subscription.sql +index 444e563ff3..1a538a98a0 100644 +--- a/src/test/regress/sql/subscription.sql ++++ b/src/test/regress/sql/subscription.sql +@@ -2,10 +2,10 @@ + -- SUBSCRIPTION + -- + +-CREATE ROLE regress_subscription_user LOGIN SUPERUSER; +-CREATE ROLE regress_subscription_user2; +-CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription; +-CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER; ++CREATE ROLE regress_subscription_user LOGIN SUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_subscription_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE ROLE regress_subscription_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER IN ROLE pg_create_subscription; ++CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET SESSION AUTHORIZATION 'regress_subscription_user'; + + -- fail - no publications +diff --git a/src/test/regress/sql/test_setup.sql b/src/test/regress/sql/test_setup.sql +index 1b2d434683..b765c748b8 100644 +--- a/src/test/regress/sql/test_setup.sql ++++ b/src/test/regress/sql/test_setup.sql +@@ -135,7 +135,8 @@ CREATE TABLE onek ( + ); + + \set filename :abs_srcdir '/data/onek.data' +-COPY onek FROM :'filename'; ++\set command '\\copy onek FROM ' :'filename'; ++:command + VACUUM ANALYZE onek; + + CREATE TABLE onek2 AS SELECT * FROM onek; +@@ -161,7 +162,8 @@ CREATE TABLE tenk1 ( + ); + + \set filename :abs_srcdir '/data/tenk.data' +-COPY tenk1 FROM :'filename'; ++\set command '\\copy tenk1 FROM ' :'filename'; ++:command + VACUUM ANALYZE tenk1; + + CREATE TABLE tenk2 AS SELECT * FROM tenk1; +@@ -174,7 +176,8 @@ CREATE TABLE person ( + ); + + \set filename :abs_srcdir '/data/person.data' +-COPY person FROM :'filename'; ++\set command '\\copy person FROM ' :'filename'; ++:command + VACUUM ANALYZE person; + + CREATE TABLE emp ( +@@ -183,7 +186,8 @@ CREATE TABLE emp ( + ) INHERITS (person); + + \set filename :abs_srcdir '/data/emp.data' +-COPY emp FROM :'filename'; ++\set command '\\copy emp FROM ' :'filename'; ++:command + VACUUM ANALYZE emp; + + CREATE TABLE student ( +@@ -191,7 +195,8 @@ CREATE TABLE student ( + ) INHERITS (person); + + \set filename :abs_srcdir '/data/student.data' +-COPY student FROM :'filename'; ++\set command '\\copy student FROM ' :'filename'; ++:command + VACUUM ANALYZE student; + + CREATE TABLE stud_emp ( +@@ -199,7 +204,8 @@ CREATE TABLE stud_emp ( + ) INHERITS (emp, student); + + \set filename :abs_srcdir '/data/stud_emp.data' +-COPY stud_emp FROM :'filename'; ++\set command '\\copy stud_emp FROM ' :'filename'; ++:command + VACUUM ANALYZE stud_emp; + + CREATE TABLE road ( +@@ -208,7 +214,8 @@ CREATE TABLE road ( + ); + + \set filename :abs_srcdir '/data/streets.data' +-COPY road FROM :'filename'; ++\set command '\\copy road FROM ' :'filename'; ++:command + VACUUM ANALYZE road; + + CREATE TABLE ihighway () INHERITS (road); +diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql +index fbd26cdba4..7ec2d78eee 100644 +--- a/src/test/regress/sql/tsearch.sql ++++ b/src/test/regress/sql/tsearch.sql +@@ -49,7 +49,8 @@ CREATE TABLE test_tsvector( + ); + + \set filename :abs_srcdir '/data/tsearch.data' +-COPY test_tsvector FROM :'filename'; ++\set command '\\copy test_tsvector FROM ' :'filename'; ++:command + + ANALYZE test_tsvector; + +diff --git a/src/test/regress/sql/updatable_views.sql b/src/test/regress/sql/updatable_views.sql +index 0a3176e25d..7744ef68f5 100644 +--- a/src/test/regress/sql/updatable_views.sql ++++ b/src/test/regress/sql/updatable_views.sql +@@ -425,9 +425,9 @@ DROP TABLE base_tbl CASCADE; + + -- permissions checks + +-CREATE USER regress_view_user1; +-CREATE USER regress_view_user2; +-CREATE USER regress_view_user3; ++CREATE USER regress_view_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_view_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++CREATE USER regress_view_user3 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + SET SESSION AUTHORIZATION regress_view_user1; + CREATE TABLE base_tbl(a int, b text, c float); +@@ -1586,8 +1586,8 @@ drop view uv_iocu_view; + drop table uv_iocu_tab; + + -- ON CONFLICT DO UPDATE permissions checks +-create user regress_view_user1; +-create user regress_view_user2; ++create user regress_view_user1 PASSWORD NEON_PASSWORD_PLACEHOLDER; ++create user regress_view_user2 PASSWORD NEON_PASSWORD_PLACEHOLDER; + + set session authorization regress_view_user1; + create table base_tbl(a int unique, b text, c float); +diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql +index 7a7bee77b9..07b480cd59 100644 +--- a/src/test/regress/sql/update.sql ++++ b/src/test/regress/sql/update.sql +@@ -339,7 +339,7 @@ DROP FUNCTION func_parted_mod_b(); + ----------------------------------------- + + ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY; +-CREATE USER regress_range_parted_user; ++CREATE USER regress_range_parted_user PASSWORD NEON_PASSWORD_PLACEHOLDER; + GRANT ALL ON range_parted, mintab TO regress_range_parted_user; + CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); + CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); +diff --git a/src/test/regress/sql/vacuum.sql b/src/test/regress/sql/vacuum.sql +index ae36b54641..5612b8e162 100644 +--- a/src/test/regress/sql/vacuum.sql ++++ b/src/test/regress/sql/vacuum.sql +@@ -335,7 +335,7 @@ CREATE TABLE vacowned (a int); + CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); + CREATE TABLE vacowned_part1 PARTITION OF vacowned_parted FOR VALUES IN (1); + CREATE TABLE vacowned_part2 PARTITION OF vacowned_parted FOR VALUES IN (2); +-CREATE ROLE regress_vacuum; ++CREATE ROLE regress_vacuum PASSWORD NEON_PASSWORD_PLACEHOLDER; + SET ROLE regress_vacuum; + -- Simple table + VACUUM vacowned; diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index 3b755bb042..ddc8155ff3 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -9,6 +9,8 @@ OBJS = \ hll.o \ libpagestore.o \ neon.o \ + neon_pgversioncompat.o \ + neon_perf_counters.o \ neon_utils.o \ neon_walreader.o \ pagestore_smgr.o \ @@ -23,7 +25,7 @@ SHLIB_LINK_INTERNAL = $(libpq) SHLIB_LINK = -lcurl EXTENSION = neon -DATA = neon--1.0.sql neon--1.0--1.1.sql neon--1.1--1.2.sql neon--1.2--1.3.sql neon--1.3--1.2.sql neon--1.2--1.1.sql neon--1.1--1.0.sql neon--1.3--1.4.sql neon--1.4--1.3.sql +DATA = neon--1.0.sql neon--1.0--1.1.sql neon--1.1--1.2.sql neon--1.2--1.3.sql neon--1.3--1.2.sql neon--1.2--1.1.sql neon--1.1--1.0.sql neon--1.3--1.4.sql neon--1.4--1.3.sql neon--1.4--1.5.sql neon--1.5--1.4.sql PGFILEDESC = "neon - cloud storage for PostgreSQL" EXTRA_CLEAN = \ diff --git a/pgxn/neon/file_cache.c b/pgxn/neon/file_cache.c index ab6739465b..2b461c8641 100644 --- a/pgxn/neon/file_cache.c +++ b/pgxn/neon/file_cache.c @@ -109,6 +109,7 @@ typedef struct FileCacheControl * reenabling */ uint32 size; /* size of cache file in chunks */ uint32 used; /* number of used chunks */ + uint32 used_pages; /* number of used pages */ uint32 limit; /* shared copy of lfc_size_limit */ uint64 hits; uint64 misses; @@ -905,6 +906,10 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, /* Cache overflow: evict least recently used chunk */ FileCacheEntry *victim = dlist_container(FileCacheEntry, list_node, dlist_pop_head_node(&lfc_ctl->lru)); + for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + { + lfc_ctl->used_pages -= (victim->bitmap[i >> 5] >> (i & 31)) & 1; + } CriticalAssert(victim->access_count == 0); entry->offset = victim->offset; /* grab victim's chunk */ hash_search_with_hash_value(lfc_hash, &victim->key, victim->hash, HASH_REMOVE, NULL); @@ -959,6 +964,7 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, for (int i = 0; i < blocks_in_chunk; i++) { + lfc_ctl->used_pages += 1 - ((entry->bitmap[(chunk_offs + i) >> 5] >> ((chunk_offs + i) & 31)) & 1); entry->bitmap[(chunk_offs + i) >> 5] |= (1 << ((chunk_offs + i) & 31)); } @@ -1051,6 +1057,11 @@ neon_get_lfc_stats(PG_FUNCTION_ARGS) if (lfc_ctl) value = lfc_ctl->size; break; + case 5: + key = "file_cache_used_pages"; + if (lfc_ctl) + value = lfc_ctl->used_pages; + break; default: SRF_RETURN_DONE(funcctx); } diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index df7000acc0..07a19a7114 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -30,6 +30,7 @@ #include "utils/guc.h" #include "neon.h" +#include "neon_perf_counters.h" #include "neon_utils.h" #include "pagestore_client.h" #include "walproposer.h" @@ -331,6 +332,7 @@ CLEANUP_AND_DISCONNECT(PageServer *shard) } if (shard->conn) { + MyNeonCounters->pageserver_disconnects_total++; PQfinish(shard->conn); shard->conn = NULL; } @@ -737,6 +739,8 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) PageServer *shard = &page_servers[shard_no]; PGconn *pageserver_conn; + MyNeonCounters->pageserver_requests_sent_total++; + /* If the connection was lost for some reason, reconnect */ if (shard->state == PS_Connected && PQstatus(shard->conn) == CONNECTION_BAD) { @@ -889,6 +893,7 @@ pageserver_flush(shardno_t shard_no) } else { + MyNeonCounters->pageserver_send_flushes_total++; if (PQflush(pageserver_conn)) { char *msg = pchomp(PQerrorMessage(pageserver_conn)); @@ -922,7 +927,7 @@ check_neon_id(char **newval, void **extra, GucSource source) static Size PagestoreShmemSize(void) { - return sizeof(PagestoreShmemState); + return add_size(sizeof(PagestoreShmemState), NeonPerfCountersShmemSize()); } static bool @@ -941,6 +946,9 @@ PagestoreShmemInit(void) memset(&pagestore_shared->shard_map, 0, sizeof(ShardMap)); AssignPageserverConnstring(page_server_connstring, NULL); } + + NeonPerfCountersShmemInit(); + LWLockRelease(AddinShmemInitLock); return found; } diff --git a/pgxn/neon/neon--1.4--1.5.sql b/pgxn/neon/neon--1.4--1.5.sql new file mode 100644 index 0000000000..a1db7bf1b1 --- /dev/null +++ b/pgxn/neon/neon--1.4--1.5.sql @@ -0,0 +1,39 @@ +\echo Use "ALTER EXTENSION neon UPDATE TO '1.5'" to load this file. \quit + + +CREATE FUNCTION get_backend_perf_counters() +RETURNS SETOF RECORD +AS 'MODULE_PATHNAME', 'neon_get_backend_perf_counters' +LANGUAGE C PARALLEL SAFE; + +CREATE FUNCTION get_perf_counters() +RETURNS SETOF RECORD +AS 'MODULE_PATHNAME', 'neon_get_perf_counters' +LANGUAGE C PARALLEL SAFE; + +-- Show various metrics, for each backend. Note that the values are not reset +-- when a backend exits. When a new backend starts with the backend ID, it will +-- continue accumulating the values from where the old backend left. If you are +-- only interested in the changes from your own session, store the values at the +-- beginning of the session somewhere, and subtract them on subsequent calls. +-- +-- For histograms, 'bucket_le' is the upper bound of the histogram bucket. +CREATE VIEW neon_backend_perf_counters AS + SELECT P.procno, P.pid, P.metric, P.bucket_le, P.value + FROM get_backend_perf_counters() AS P ( + procno integer, + pid integer, + metric text, + bucket_le float8, + value float8 + ); + +-- Summary across all backends. (This could also be implemented with +-- an aggregate query over neon_backend_perf_counters view.) +CREATE VIEW neon_perf_counters AS + SELECT P.metric, P.bucket_le, P.value + FROM get_perf_counters() AS P ( + metric text, + bucket_le float8, + value float8 + ); diff --git a/pgxn/neon/neon--1.5--1.4.sql b/pgxn/neon/neon--1.5--1.4.sql new file mode 100644 index 0000000000..7939fd8aa9 --- /dev/null +++ b/pgxn/neon/neon--1.5--1.4.sql @@ -0,0 +1,4 @@ +DROP VIEW IF EXISTS neon_perf_counters; +DROP VIEW IF EXISTS neon_backend_perf_counters; +DROP FUNCTION IF EXISTS get_perf_counters(); +DROP FUNCTION IF EXISTS get_backend_perf_counters(); diff --git a/pgxn/neon/neon.control b/pgxn/neon/neon.control index 03bdb9a0b4..0b36bdbb65 100644 --- a/pgxn/neon/neon.control +++ b/pgxn/neon/neon.control @@ -1,5 +1,7 @@ # neon extension comment = 'cloud storage for PostgreSQL' +# TODO: bump default version to 1.5, after we are certain that we don't +# need to rollback the compute image default_version = '1.4' module_pathname = '$libdir/neon' relocatable = true diff --git a/pgxn/neon/neon_perf_counters.c b/pgxn/neon/neon_perf_counters.c new file mode 100644 index 0000000000..3e86d5b262 --- /dev/null +++ b/pgxn/neon/neon_perf_counters.c @@ -0,0 +1,261 @@ +/*------------------------------------------------------------------------- + * + * neon_perf_counters.c + * Collect statistics about Neon I/O + * + * Each backend has its own set of counters in shared memory. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include + +#include "funcapi.h" +#include "miscadmin.h" +#include "storage/proc.h" +#include "storage/shmem.h" +#include "utils/builtins.h" + +#include "neon_perf_counters.h" +#include "neon_pgversioncompat.h" + +neon_per_backend_counters *neon_per_backend_counters_shared; + +Size +NeonPerfCountersShmemSize(void) +{ + Size size = 0; + + size = add_size(size, mul_size(MaxBackends, sizeof(neon_per_backend_counters))); + + return size; +} + +bool +NeonPerfCountersShmemInit(void) +{ + bool found; + + neon_per_backend_counters_shared = + ShmemInitStruct("Neon perf counters", + mul_size(MaxBackends, + sizeof(neon_per_backend_counters)), + &found); + Assert(found == IsUnderPostmaster); + if (!found) + { + /* shared memory is initialized to zeros, so nothing to do here */ + } +} + +/* + * Count a GetPage wait operation. + */ +void +inc_getpage_wait(uint64 latency_us) +{ + int lo = 0; + int hi = NUM_GETPAGE_WAIT_BUCKETS - 1; + + /* Find the right bucket with binary search */ + while (lo < hi) + { + int mid = (lo + hi) / 2; + + if (latency_us < getpage_wait_bucket_thresholds[mid]) + hi = mid; + else + lo = mid + 1; + } + MyNeonCounters->getpage_wait_us_bucket[lo]++; + MyNeonCounters->getpage_wait_us_sum += latency_us; + MyNeonCounters->getpage_wait_us_count++; +} + +/* + * Support functions for the views, neon_backend_perf_counters and + * neon_perf_counters. + */ + +typedef struct +{ + char *name; + bool is_bucket; + double bucket_le; + double value; +} metric_t; + +static metric_t * +neon_perf_counters_to_metrics(neon_per_backend_counters *counters) +{ +#define NUM_METRICS (2 + NUM_GETPAGE_WAIT_BUCKETS + 8) + metric_t *metrics = palloc((NUM_METRICS + 1) * sizeof(metric_t)); + uint64 bucket_accum; + int i = 0; + Datum getpage_wait_str; + + metrics[i].name = "getpage_wait_seconds_count"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->getpage_wait_us_count; + i++; + metrics[i].name = "getpage_wait_seconds_sum"; + metrics[i].is_bucket = false; + metrics[i].value = ((double) counters->getpage_wait_us_sum) / 1000000.0; + i++; + + bucket_accum = 0; + for (int bucketno = 0; bucketno < NUM_GETPAGE_WAIT_BUCKETS; bucketno++) + { + uint64 threshold = getpage_wait_bucket_thresholds[bucketno]; + + bucket_accum += counters->getpage_wait_us_bucket[bucketno]; + + metrics[i].name = "getpage_wait_seconds_bucket"; + metrics[i].is_bucket = true; + metrics[i].bucket_le = (threshold == UINT64_MAX) ? INFINITY : ((double) threshold) / 1000000.0; + metrics[i].value = (double) bucket_accum; + i++; + } + metrics[i].name = "getpage_prefetch_requests_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->getpage_prefetch_requests_total; + i++; + metrics[i].name = "getpage_sync_requests_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->getpage_sync_requests_total; + i++; + metrics[i].name = "getpage_prefetch_misses_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->getpage_prefetch_misses_total; + i++; + metrics[i].name = "getpage_prefetch_discards_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->getpage_prefetch_discards_total; + i++; + metrics[i].name = "pageserver_requests_sent_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->pageserver_requests_sent_total; + i++; + metrics[i].name = "pageserver_requests_disconnects_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->pageserver_disconnects_total; + i++; + metrics[i].name = "pageserver_send_flushes_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->pageserver_send_flushes_total; + i++; + metrics[i].name = "file_cache_hits_total"; + metrics[i].is_bucket = false; + metrics[i].value = (double) counters->file_cache_hits_total; + i++; + + Assert(i == NUM_METRICS); + + /* NULL entry marks end of array */ + metrics[i].name = NULL; + metrics[i].value = 0; + + return metrics; +} + +/* + * Write metric to three output Datums + */ +static void +metric_to_datums(metric_t *m, Datum *values, bool *nulls) +{ + values[0] = CStringGetTextDatum(m->name); + nulls[0] = false; + if (m->is_bucket) + { + values[1] = Float8GetDatum(m->bucket_le); + nulls[1] = false; + } + else + { + values[1] = (Datum) 0; + nulls[1] = true; + } + values[2] = Float8GetDatum(m->value); + nulls[2] = false; +} + +PG_FUNCTION_INFO_V1(neon_get_backend_perf_counters); +Datum +neon_get_backend_perf_counters(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + Datum values[5]; + bool nulls[5]; + + /* We put all the tuples into a tuplestore in one go. */ + InitMaterializedSRF(fcinfo, 0); + + for (int procno = 0; procno < MaxBackends; procno++) + { + PGPROC *proc = GetPGProcByNumber(procno); + int pid = proc->pid; + neon_per_backend_counters *counters = &neon_per_backend_counters_shared[procno]; + metric_t *metrics = neon_perf_counters_to_metrics(counters); + + values[0] = Int32GetDatum(procno); + nulls[0] = false; + values[1] = Int32GetDatum(pid); + nulls[1] = false; + + for (int i = 0; metrics[i].name != NULL; i++) + { + metric_to_datums(&metrics[i], &values[2], &nulls[2]); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); + } + + pfree(metrics); + } + + return (Datum) 0; +} + +PG_FUNCTION_INFO_V1(neon_get_perf_counters); +Datum +neon_get_perf_counters(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + Datum values[3]; + bool nulls[3]; + Datum getpage_wait_str; + neon_per_backend_counters totals = {0}; + metric_t *metrics; + + /* We put all the tuples into a tuplestore in one go. */ + InitMaterializedSRF(fcinfo, 0); + + /* Aggregate the counters across all backends */ + for (int procno = 0; procno < MaxBackends; procno++) + { + neon_per_backend_counters *counters = &neon_per_backend_counters_shared[procno]; + + totals.getpage_wait_us_count += counters->getpage_wait_us_count; + totals.getpage_wait_us_sum += counters->getpage_wait_us_sum; + for (int bucketno = 0; bucketno < NUM_GETPAGE_WAIT_BUCKETS; bucketno++) + totals.getpage_wait_us_bucket[bucketno] += counters->getpage_wait_us_bucket[bucketno]; + totals.getpage_prefetch_requests_total += counters->getpage_prefetch_requests_total; + totals.getpage_sync_requests_total += counters->getpage_sync_requests_total; + totals.getpage_prefetch_misses_total += counters->getpage_prefetch_misses_total; + totals.getpage_prefetch_discards_total += counters->getpage_prefetch_discards_total; + totals.pageserver_requests_sent_total += counters->pageserver_requests_sent_total; + totals.pageserver_disconnects_total += counters->pageserver_disconnects_total; + totals.pageserver_send_flushes_total += counters->pageserver_send_flushes_total; + totals.file_cache_hits_total += counters->file_cache_hits_total; + } + + metrics = neon_perf_counters_to_metrics(&totals); + for (int i = 0; metrics[i].name != NULL; i++) + { + metric_to_datums(&metrics[i], &values[0], &nulls[0]); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); + } + pfree(metrics); + + return (Datum) 0; +} diff --git a/pgxn/neon/neon_perf_counters.h b/pgxn/neon/neon_perf_counters.h new file mode 100644 index 0000000000..ae35e8c3a5 --- /dev/null +++ b/pgxn/neon/neon_perf_counters.h @@ -0,0 +1,111 @@ +/*------------------------------------------------------------------------- + * + * neon_perf_counters.h + * Performance counters for neon storage requests + *------------------------------------------------------------------------- + */ + +#ifndef NEON_PERF_COUNTERS_H +#define NEON_PERF_COUNTERS_H + +#if PG_VERSION_NUM >= 170000 +#include "storage/procnumber.h" +#else +#include "storage/backendid.h" +#include "storage/proc.h" +#endif + +static const uint64 getpage_wait_bucket_thresholds[] = { + 20, 30, 60, 100, /* 0 - 100 us */ + 200, 300, 600, 1000, /* 100 us - 1 ms */ + 2000, 3000, 6000, 10000, /* 1 ms - 10 ms */ + 20000, 30000, 60000, 100000, /* 10 ms - 100 ms */ + 200000, 300000, 600000, 1000000, /* 100 ms - 1 s */ + 2000000, 3000000, 6000000, 10000000, /* 1 s - 10 s */ + 20000000, 30000000, 60000000, 100000000, /* 10 s - 100 s */ + UINT64_MAX, +}; +#define NUM_GETPAGE_WAIT_BUCKETS (lengthof(getpage_wait_bucket_thresholds)) + +typedef struct +{ + /* + * Histogram for how long an smgrread() request needs to wait for response + * from pageserver. When prefetching is effective, these wait times can be + * lower than the network latency to the pageserver, even zero, if the + * page is already readily prefetched whenever we need to read a page. + * + * Note: we accumulate these in microseconds, because that's convenient in + * the backend, but the 'neon_backend_perf_counters' view will convert + * them to seconds, to make them more idiomatic as prometheus metrics. + */ + uint64 getpage_wait_us_count; + uint64 getpage_wait_us_sum; + uint64 getpage_wait_us_bucket[NUM_GETPAGE_WAIT_BUCKETS]; + + /* + * Total number of speculative prefetch Getpage requests and synchronous + * GetPage requests sent. + */ + uint64 getpage_prefetch_requests_total; + uint64 getpage_sync_requests_total; + + /* XXX: It's not clear to me when these misses happen. */ + uint64 getpage_prefetch_misses_total; + + /* + * Number of prefetched responses that were discarded becuase the + * prefetched page was not needed or because it was concurrently fetched / + * modified by another backend. + */ + uint64 getpage_prefetch_discards_total; + + /* + * Total number of requests send to pageserver. (prefetch_requests_total + * and sync_request_total count only GetPage requests, this counts all + * request types.) + */ + uint64 pageserver_requests_sent_total; + + /* + * Number of times the connection to the pageserver was lost and the + * backend had to reconnect. Note that this doesn't count the first + * connection in each backend, only reconnects. + */ + uint64 pageserver_disconnects_total; + + /* + * Number of network flushes to the pageserver. Synchronous requests are + * flushed immediately, but when prefetching requests are sent in batches, + * this can be smaller than pageserver_requests_sent_total. + */ + uint64 pageserver_send_flushes_total; + + /* + * Number of requests satisfied from the LFC. + * + * This is redundant with the server-wide file_cache_hits, but this gives + * per-backend granularity, and it's handy to have this in the same place + * as counters for requests that went to the pageserver. Maybe move all + * the LFC stats to this struct in the future? + */ + uint64 file_cache_hits_total; + +} neon_per_backend_counters; + +/* Pointer to the shared memory array of neon_per_backend_counters structs */ +extern neon_per_backend_counters *neon_per_backend_counters_shared; + +#if PG_VERSION_NUM >= 170000 +#define MyNeonCounters (&neon_per_backend_counters_shared[MyProcNumber]) +#else +#define MyNeonCounters (&neon_per_backend_counters_shared[MyProc->pgprocno]) +#endif + +extern void inc_getpage_wait(uint64 latency); + +extern Size NeonPerfCountersShmemSize(void); +extern bool NeonPerfCountersShmemInit(void); + + +#endif /* NEON_PERF_COUNTERS_H */ diff --git a/pgxn/neon/neon_pgversioncompat.c b/pgxn/neon/neon_pgversioncompat.c new file mode 100644 index 0000000000..a0dbddde4b --- /dev/null +++ b/pgxn/neon/neon_pgversioncompat.c @@ -0,0 +1,44 @@ +/* + * Support functions for the compatibility macros in neon_pgversioncompat.h + */ +#include "postgres.h" + +#include "funcapi.h" +#include "miscadmin.h" +#include "utils/tuplestore.h" + +#include "neon_pgversioncompat.h" + +#if PG_MAJORVERSION_NUM < 15 +void +InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + Tuplestorestate *tupstore; + MemoryContext old_context, + per_query_ctx; + TupleDesc stored_tupdesc; + + /* check to see if caller supports returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + + /* + * Store the tuplestore and the tuple descriptor in ReturnSetInfo. This + * must be done in the per-query memory context. + */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + old_context = MemoryContextSwitchTo(per_query_ctx); + + if (get_call_result_type(fcinfo, NULL, &stored_tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + tupstore = tuplestore_begin_heap(false, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = stored_tupdesc; + MemoryContextSwitchTo(old_context); +} +#endif diff --git a/pgxn/neon/neon_pgversioncompat.h b/pgxn/neon/neon_pgversioncompat.h index 59b97d64fe..e4754ec7ea 100644 --- a/pgxn/neon/neon_pgversioncompat.h +++ b/pgxn/neon/neon_pgversioncompat.h @@ -6,6 +6,8 @@ #ifndef NEON_PGVERSIONCOMPAT_H #define NEON_PGVERSIONCOMPAT_H +#include "fmgr.h" + #if PG_MAJORVERSION_NUM < 17 #define NRelFileInfoBackendIsTemp(rinfo) (rinfo.backend != InvalidBackendId) #else @@ -123,4 +125,8 @@ #define AmAutoVacuumWorkerProcess() (IsAutoVacuumWorkerProcess()) #endif +#if PG_MAJORVERSION_NUM < 15 +extern void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags); +#endif + #endif /* NEON_PGVERSIONCOMPAT_H */ diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 36538ea5e2..1c87f4405c 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -66,6 +66,7 @@ #include "storage/md.h" #include "storage/smgr.h" +#include "neon_perf_counters.h" #include "pagestore_client.h" #include "bitmap.h" @@ -289,7 +290,6 @@ static PrefetchState *MyPState; static bool compact_prefetch_buffers(void); static void consume_prefetch_responses(void); -static uint64 prefetch_register_buffer(BufferTag tag, neon_request_lsns *force_request_lsns); static bool prefetch_read(PrefetchRequest *slot); static void prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns); static bool prefetch_wait_for(uint64 ring_index); @@ -780,21 +780,27 @@ prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns } /* - * prefetch_register_buffer() - register and prefetch buffer + * prefetch_register_bufferv() - register and prefetch buffers * * Register that we may want the contents of BufferTag in the near future. + * This is used when issuing a speculative prefetch request, but also when + * performing a synchronous request and need the buffer right now. * * If force_request_lsns is not NULL, those values are sent to the * pageserver. If NULL, we utilize the lastWrittenLsn -infrastructure * to calculate the LSNs to send. * + * When performing a prefetch rather than a synchronous request, + * is_prefetch==true. Currently, it only affects how the request is accounted + * in the perf counters. + * * NOTE: this function may indirectly update MyPState->pfs_hash; which * invalidates any active pointers into the hash table. */ - static uint64 prefetch_register_bufferv(BufferTag tag, neon_request_lsns *frlsns, - BlockNumber nblocks, const bits8 *mask) + BlockNumber nblocks, const bits8 *mask, + bool is_prefetch) { uint64 min_ring_index; PrefetchRequest req; @@ -815,6 +821,7 @@ Retry: PrfHashEntry *entry = NULL; uint64 ring_index; neon_request_lsns *lsns; + if (PointerIsValid(mask) && !BITMAP_ISSET(mask, i)) continue; @@ -858,6 +865,7 @@ Retry: prefetch_set_unused(ring_index); entry = NULL; slot = NULL; + MyNeonCounters->getpage_prefetch_discards_total++; } } @@ -972,6 +980,11 @@ Retry: min_ring_index = Min(min_ring_index, ring_index); + if (is_prefetch) + MyNeonCounters->getpage_prefetch_requests_total++; + else + MyNeonCounters->getpage_sync_requests_total++; + prefetch_do_request(slot, lsns); } @@ -1000,13 +1013,6 @@ Retry: } -static uint64 -prefetch_register_buffer(BufferTag tag, neon_request_lsns *force_request_lsns) -{ - return prefetch_register_bufferv(tag, force_request_lsns, 1, NULL); -} - - /* * Note: this function can get canceled and use a long jump to the next catch * context. Take care. @@ -2612,7 +2618,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, lfc_present[i] = ~(lfc_present[i]); ring_index = prefetch_register_bufferv(tag, NULL, iterblocks, - lfc_present); + lfc_present, true); nblocks -= iterblocks; blocknum += iterblocks; @@ -2656,7 +2662,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) CopyNRelFileInfoToBufTag(tag, InfoFromSMgrRel(reln)); - ring_index = prefetch_register_buffer(tag, NULL); + ring_index = prefetch_register_bufferv(tag, NULL, 1, NULL, true); Assert(ring_index < MyPState->ring_unused && MyPState->ring_last <= ring_index); @@ -2747,17 +2753,20 @@ neon_read_at_lsnv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber base_block * weren't for the behaviour of the LwLsn cache that uses the highest * value of the LwLsn cache when the entry is not found. */ - prefetch_register_bufferv(buftag, request_lsns, nblocks, mask); + prefetch_register_bufferv(buftag, request_lsns, nblocks, mask, false); for (int i = 0; i < nblocks; i++) { void *buffer = buffers[i]; BlockNumber blockno = base_blockno + i; neon_request_lsns *reqlsns = &request_lsns[i]; + TimestampTz start_ts, end_ts; if (PointerIsValid(mask) && !BITMAP_ISSET(mask, i)) continue; + start_ts = GetCurrentTimestamp(); + if (RecoveryInProgress() && MyBackendType != B_STARTUP) XLogWaitForReplayOf(reqlsns[0].request_lsn); @@ -2794,6 +2803,7 @@ Retry: /* drop caches */ prefetch_set_unused(slot->my_ring_index); pgBufferUsage.prefetch.expired += 1; + MyNeonCounters->getpage_prefetch_discards_total++; /* make it look like a prefetch cache miss */ entry = NULL; } @@ -2804,8 +2814,9 @@ Retry: if (entry == NULL) { pgBufferUsage.prefetch.misses += 1; + MyNeonCounters->getpage_prefetch_misses_total++; - ring_index = prefetch_register_bufferv(buftag, reqlsns, 1, NULL); + ring_index = prefetch_register_bufferv(buftag, reqlsns, 1, NULL, false); Assert(ring_index != UINT64_MAX); slot = GetPrfSlot(ring_index); } @@ -2860,6 +2871,9 @@ Retry: /* buffer was used, clean up for later reuse */ prefetch_set_unused(ring_index); prefetch_cleanup_trailing_unused(); + + end_ts = GetCurrentTimestamp(); + inc_getpage_wait(end_ts >= start_ts ? (end_ts - start_ts) : 0); } } @@ -2913,6 +2927,7 @@ neon_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, void *buffer /* Try to read from local file cache */ if (lfc_read(InfoFromSMgrRel(reln), forkNum, blkno, buffer)) { + MyNeonCounters->file_cache_hits_total++; return; } @@ -3097,7 +3112,7 @@ neon_readv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, /* assume heap */ RmgrTable[RM_HEAP_ID].rm_mask(mdbuf_masked, blkno); RmgrTable[RM_HEAP_ID].rm_mask(pageserver_masked, blkno); - + if (memcmp(mdbuf_masked, pageserver_masked, BLCKSZ) != 0) { neon_log(PANIC, "heap buffers differ at blk %u in rel %u/%u/%u fork %u (request LSN %X/%08X):\n------ MD ------\n%s\n------ Page Server ------\n%s\n", diff --git a/pgxn/neon_walredo/walredoproc.c b/pgxn/neon_walredo/walredoproc.c index 219ca85207..f98aa1cbe7 100644 --- a/pgxn/neon_walredo/walredoproc.c +++ b/pgxn/neon_walredo/walredoproc.c @@ -24,6 +24,7 @@ * PushPage ('P'): Copy a page image (in the payload) to buffer cache * ApplyRecord ('A'): Apply a WAL record (in the payload) * GetPage ('G'): Return a page image from buffer cache. + * Ping ('H'): Return the input message. * * Currently, you only get a response to GetPage requests; the response is * simply a 8k page, without any headers. Errors are logged to stderr. @@ -133,6 +134,7 @@ static void ApplyRecord(StringInfo input_message); static void apply_error_callback(void *arg); static bool redo_block_filter(XLogReaderState *record, uint8 block_id); static void GetPage(StringInfo input_message); +static void Ping(StringInfo input_message); static ssize_t buffered_read(void *buf, size_t count); static void CreateFakeSharedMemoryAndSemaphores(); @@ -394,6 +396,10 @@ WalRedoMain(int argc, char *argv[]) GetPage(&input_message); break; + case 'H': /* Ping */ + Ping(&input_message); + break; + /* * EOF means we're done. Perform normal shutdown. */ @@ -1057,6 +1063,36 @@ GetPage(StringInfo input_message) } +static void +Ping(StringInfo input_message) +{ + int tot_written; + /* Response: the input message */ + tot_written = 0; + do { + ssize_t rc; + /* We don't need alignment, but it's bad practice to use char[BLCKSZ] */ +#if PG_VERSION_NUM >= 160000 + static const PGIOAlignedBlock response; +#else + static const PGAlignedBlock response; +#endif + rc = write(STDOUT_FILENO, &response.data[tot_written], BLCKSZ - tot_written); + if (rc < 0) { + /* If interrupted by signal, just retry */ + if (errno == EINTR) + continue; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write to stdout: %m"))); + } + tot_written += rc; + } while (tot_written < BLCKSZ); + + elog(TRACE, "Page sent back for ping"); +} + + /* Buffer used by buffered_read() */ static char stdin_buf[16 * 1024]; static size_t stdin_len = 0; /* # of bytes in buffer */ diff --git a/proxy/src/auth/backend.rs b/proxy/src/auth/backend.rs index 5bc2f2ff65..4e9f4591ad 100644 --- a/proxy/src/auth/backend.rs +++ b/proxy/src/auth/backend.rs @@ -444,7 +444,7 @@ impl<'a> Backend<'a, ComputeUserInfoMaybeEndpoint, &()> { Self::Web(url, ()) => { info!("performing web authentication"); - let info = web::authenticate(ctx, &url, client).await?; + let info = web::authenticate(ctx, config, &url, client).await?; Backend::Web(url, info) } diff --git a/proxy/src/auth/backend/web.rs b/proxy/src/auth/backend/web.rs index 58a4bef62e..05f437355e 100644 --- a/proxy/src/auth/backend/web.rs +++ b/proxy/src/auth/backend/web.rs @@ -1,5 +1,6 @@ use crate::{ auth, compute, + config::AuthenticationConfig, console::{self, provider::NodeInfo}, context::RequestMonitoring, error::{ReportableError, UserFacingError}, @@ -58,6 +59,7 @@ pub(crate) fn new_psql_session_id() -> String { pub(super) async fn authenticate( ctx: &RequestMonitoring, + auth_config: &'static AuthenticationConfig, link_uri: &reqwest::Url, client: &mut PqStream, ) -> auth::Result { @@ -89,6 +91,14 @@ pub(super) async fn authenticate( info!(parent: &span, "waiting for console's reply..."); let db_info = waiter.await.map_err(WebAuthError::from)?; + if auth_config.ip_allowlist_check_enabled { + if let Some(allowed_ips) = &db_info.allowed_ips { + if !auth::check_peer_addr_is_in_list(&ctx.peer_addr(), allowed_ips) { + return Err(auth::AuthError::ip_address_not_allowed(ctx.peer_addr())); + } + } + } + client.write_message_noflush(&Be::NoticeResponse("Connecting to database."))?; // This config should be self-contained, because we won't diff --git a/proxy/src/console/messages.rs b/proxy/src/console/messages.rs index 9b66333cd4..85683acb82 100644 --- a/proxy/src/console/messages.rs +++ b/proxy/src/console/messages.rs @@ -284,6 +284,8 @@ pub(crate) struct DatabaseInfo { /// be inconvenient for debug with local PG instance. pub(crate) password: Option>, pub(crate) aux: MetricsAuxInfo, + #[serde(default)] + pub(crate) allowed_ips: Option>, } // Manually implement debug to omit sensitive info. @@ -294,6 +296,7 @@ impl fmt::Debug for DatabaseInfo { .field("port", &self.port) .field("dbname", &self.dbname) .field("user", &self.user) + .field("allowed_ips", &self.allowed_ips) .finish_non_exhaustive() } } @@ -432,6 +435,22 @@ mod tests { "aux": dummy_aux(), }))?; + // with allowed_ips + let dbinfo = serde_json::from_value::(json!({ + "host": "localhost", + "port": 5432, + "dbname": "postgres", + "user": "john_doe", + "password": "password", + "aux": dummy_aux(), + "allowed_ips": ["127.0.0.1"], + }))?; + + assert_eq!( + dbinfo.allowed_ips, + Some(vec![IpPattern::Single("127.0.0.1".parse()?)]) + ); + Ok(()) } diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index c46539485c..bafae1f551 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -71,6 +71,37 @@ impl ComputeHookTenant { } } + fn is_sharded(&self) -> bool { + matches!(self, ComputeHookTenant::Sharded(_)) + } + + /// Clear compute hook state for the specified shard. + /// Only valid for [`ComputeHookTenant::Sharded`] instances. + fn remove_shard(&mut self, tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize) { + match self { + ComputeHookTenant::Sharded(sharded) => { + if sharded.stripe_size != stripe_size + || sharded.shard_count != tenant_shard_id.shard_count + { + tracing::warn!("Shard split detected while handling detach") + } + + let shard_idx = sharded.shards.iter().position(|(shard_number, _node_id)| { + *shard_number == tenant_shard_id.shard_number + }); + + if let Some(shard_idx) = shard_idx { + sharded.shards.remove(shard_idx); + } else { + tracing::warn!("Shard not found while handling detach") + } + } + ComputeHookTenant::Unsharded(_) => { + unreachable!("Detach of unsharded tenants is handled externally"); + } + } + } + /// Set one shard's location. If stripe size or shard count have changed, Self is reset /// and drops existing content. fn update( @@ -614,6 +645,36 @@ impl ComputeHook { self.notify_execute(maybe_send_result, tenant_shard_id, cancel) .await } + + /// Reflect a detach for a particular shard in the compute hook state. + /// + /// The goal is to avoid sending compute notifications with stale information (i.e. + /// including detach pageservers). + #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))] + pub(super) fn handle_detach( + &self, + tenant_shard_id: TenantShardId, + stripe_size: ShardStripeSize, + ) { + use std::collections::hash_map::Entry; + + let mut state_locked = self.state.lock().unwrap(); + match state_locked.entry(tenant_shard_id.tenant_id) { + Entry::Vacant(_) => { + tracing::warn!("Compute hook tenant not found for detach"); + } + Entry::Occupied(mut e) => { + let sharded = e.get().is_sharded(); + if !sharded { + e.remove(); + } else { + e.get_mut().remove_shard(tenant_shard_id, stripe_size); + } + + tracing::debug!("Compute hook handled shard detach"); + } + } + } } #[cfg(test)] diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 1745bf5575..95e4a469ac 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -1849,7 +1849,7 @@ pub fn make_router( RequestName("v1_tenant_timeline"), ) }) - .post( + .put( "/v1/tenant/:tenant_id/timeline/:timeline_id/archival_config", |r| { tenant_service_handler( diff --git a/storage_controller/src/pageserver_client.rs b/storage_controller/src/pageserver_client.rs index 961a1f78dd..b19cbc4fa3 100644 --- a/storage_controller/src/pageserver_client.rs +++ b/storage_controller/src/pageserver_client.rs @@ -238,7 +238,7 @@ impl PageserverClient { ) -> Result<()> { measured_request!( "timeline_archival_config", - crate::metrics::Method::Post, + crate::metrics::Method::Put, &self.node_id_label, self.inner .timeline_archival_config(tenant_shard_id, timeline_id, req) diff --git a/storage_controller/src/reconciler.rs b/storage_controller/src/reconciler.rs index 83b7b2b4f2..750bcd7c01 100644 --- a/storage_controller/src/reconciler.rs +++ b/storage_controller/src/reconciler.rs @@ -820,6 +820,16 @@ impl Reconciler { self.location_config(&node, conf, None, false).await?; } + // The condition below identifies a detach. We must have no attached intent and + // must have been attached to something previously. Pass this information to + // the [`ComputeHook`] such that it can update its tenant-wide state. + if self.intent.attached.is_none() && !self.detach.is_empty() { + // TODO: Consider notifying control plane about detaches. This would avoid situations + // where the compute tries to start-up with a stale set of pageservers. + self.compute_hook + .handle_detach(self.tenant_shard_id, self.shard.stripe_size); + } + failpoint_support::sleep_millis_async!("sleep-on-reconcile-epilogue"); Ok(()) diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index deb5f27226..1cb1fb104d 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -2,7 +2,7 @@ use crate::{node::Node, tenant_shard::TenantShard}; use itertools::Itertools; use pageserver_api::models::PageserverUtilization; use serde::Serialize; -use std::collections::HashMap; +use std::{collections::HashMap, fmt::Debug}; use utils::{http::error::ApiError, id::NodeId}; /// Scenarios in which we cannot find a suitable location for a tenant shard @@ -27,7 +27,7 @@ pub enum MaySchedule { } #[derive(Serialize)] -struct SchedulerNode { +pub(crate) struct SchedulerNode { /// How many shards are currently scheduled on this node, via their [`crate::tenant_shard::IntentState`]. shard_count: usize, /// How many shards are currently attached on this node, via their [`crate::tenant_shard::IntentState`]. @@ -38,6 +38,137 @@ struct SchedulerNode { may_schedule: MaySchedule, } +pub(crate) trait NodeSchedulingScore: Debug + Ord + Copy + Sized { + fn generate( + node_id: &NodeId, + node: &mut SchedulerNode, + context: &ScheduleContext, + ) -> Option; + fn is_overloaded(&self) -> bool; + fn node_id(&self) -> NodeId; +} + +pub(crate) trait ShardTag { + type Score: NodeSchedulingScore; +} + +pub(crate) struct AttachedShardTag {} +impl ShardTag for AttachedShardTag { + type Score = NodeAttachmentSchedulingScore; +} + +pub(crate) struct SecondaryShardTag {} +impl ShardTag for SecondaryShardTag { + type Score = NodeSecondarySchedulingScore; +} + +/// Scheduling score of a given node for shard attachments. +/// Lower scores indicate more suitable nodes. +/// Ordering is given by member declaration order (top to bottom). +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] +pub(crate) struct NodeAttachmentSchedulingScore { + /// The number of shards belonging to the tenant currently being + /// scheduled that are attached to this node. + affinity_score: AffinityScore, + /// Size of [`ScheduleContext::attached_nodes`] for the current node. + /// This normally tracks the number of attached shards belonging to the + /// tenant being scheduled that are already on this node. + attached_shards_in_context: usize, + /// Utilisation score that combines shard count and disk utilisation + utilization_score: u64, + /// Total number of shards attached to this node. When nodes have identical utilisation, this + /// acts as an anti-affinity between attached shards. + total_attached_shard_count: usize, + /// Convenience to make selection deterministic in tests and empty systems + node_id: NodeId, +} + +impl NodeSchedulingScore for NodeAttachmentSchedulingScore { + fn generate( + node_id: &NodeId, + node: &mut SchedulerNode, + context: &ScheduleContext, + ) -> Option { + let utilization = match &mut node.may_schedule { + MaySchedule::Yes(u) => u, + MaySchedule::No => { + return None; + } + }; + + Some(Self { + affinity_score: context + .nodes + .get(node_id) + .copied() + .unwrap_or(AffinityScore::FREE), + attached_shards_in_context: context.attached_nodes.get(node_id).copied().unwrap_or(0), + utilization_score: utilization.cached_score(), + total_attached_shard_count: node.attached_shard_count, + node_id: *node_id, + }) + } + + fn is_overloaded(&self) -> bool { + PageserverUtilization::is_overloaded(self.utilization_score) + } + + fn node_id(&self) -> NodeId { + self.node_id + } +} + +/// Scheduling score of a given node for shard secondaries. +/// Lower scores indicate more suitable nodes. +/// Ordering is given by member declaration order (top to bottom). +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] +pub(crate) struct NodeSecondarySchedulingScore { + /// The number of shards belonging to the tenant currently being + /// scheduled that are attached to this node. + affinity_score: AffinityScore, + /// Utilisation score that combines shard count and disk utilisation + utilization_score: u64, + /// Total number of shards attached to this node. When nodes have identical utilisation, this + /// acts as an anti-affinity between attached shards. + total_attached_shard_count: usize, + /// Convenience to make selection deterministic in tests and empty systems + node_id: NodeId, +} + +impl NodeSchedulingScore for NodeSecondarySchedulingScore { + fn generate( + node_id: &NodeId, + node: &mut SchedulerNode, + context: &ScheduleContext, + ) -> Option { + let utilization = match &mut node.may_schedule { + MaySchedule::Yes(u) => u, + MaySchedule::No => { + return None; + } + }; + + Some(Self { + affinity_score: context + .nodes + .get(node_id) + .copied() + .unwrap_or(AffinityScore::FREE), + utilization_score: utilization.cached_score(), + total_attached_shard_count: node.attached_shard_count, + node_id: *node_id, + }) + } + + fn is_overloaded(&self) -> bool { + PageserverUtilization::is_overloaded(self.utilization_score) + } + + fn node_id(&self) -> NodeId { + self.node_id + } +} + impl PartialEq for SchedulerNode { fn eq(&self, other: &Self) -> bool { let may_schedule_matches = matches!( @@ -406,6 +537,28 @@ impl Scheduler { node.and_then(|(node_id, may_schedule)| if may_schedule { Some(node_id) } else { None }) } + /// Compute a schedulling score for each node that the scheduler knows of + /// minus a set of hard excluded nodes. + fn compute_node_scores( + &mut self, + hard_exclude: &[NodeId], + context: &ScheduleContext, + ) -> Vec + where + Score: NodeSchedulingScore, + { + self.nodes + .iter_mut() + .filter_map(|(k, v)| { + if hard_exclude.contains(k) { + None + } else { + Score::generate(k, v, context) + } + }) + .collect() + } + /// hard_exclude: it is forbidden to use nodes in this list, typically becacuse they /// are already in use by this shard -- we use this to avoid picking the same node /// as both attached and secondary location. This is a hard constraint: if we cannot @@ -415,7 +568,7 @@ impl Scheduler { /// to their anti-affinity score. We use this to prefeer to avoid placing shards in /// the same tenant on the same node. This is a soft constraint: the context will never /// cause us to fail to schedule a shard. - pub(crate) fn schedule_shard( + pub(crate) fn schedule_shard( &mut self, hard_exclude: &[NodeId], context: &ScheduleContext, @@ -424,20 +577,7 @@ impl Scheduler { return Err(ScheduleError::NoPageservers); } - let mut scores: Vec<(NodeId, AffinityScore, u64, usize)> = self - .nodes - .iter_mut() - .filter_map(|(k, v)| match &mut v.may_schedule { - MaySchedule::No => None, - MaySchedule::Yes(_) if hard_exclude.contains(k) => None, - MaySchedule::Yes(utilization) => Some(( - *k, - context.nodes.get(k).copied().unwrap_or(AffinityScore::FREE), - utilization.cached_score(), - v.attached_shard_count, - )), - }) - .collect(); + let mut scores = self.compute_node_scores::(hard_exclude, context); // Exclude nodes whose utilization is critically high, if there are alternatives available. This will // cause us to violate affinity rules if it is necessary to avoid critically overloading nodes: for example @@ -445,20 +585,18 @@ impl Scheduler { // overloaded. let non_overloaded_scores = scores .iter() - .filter(|i| !PageserverUtilization::is_overloaded(i.2)) + .filter(|i| !i.is_overloaded()) .copied() .collect::>(); if !non_overloaded_scores.is_empty() { scores = non_overloaded_scores; } - // Sort by, in order of precedence: - // 1st: Affinity score. We should never pick a higher-score node if a lower-score node is available - // 2nd: Utilization score (this combines shard count and disk utilization) - // 3rd: Attached shard count. When nodes have identical utilization (e.g. when populating some - // empty nodes), this acts as an anti-affinity between attached shards. - // 4th: Node ID. This is a convenience to make selection deterministic in tests and empty systems. - scores.sort_by_key(|i| (i.1, i.2, i.3, i.0)); + // Sort the nodes by score. The one with the lowest scores will be the preferred node. + // Refer to [`NodeAttachmentSchedulingScore`] for attached locations and + // [`NodeSecondarySchedulingScore`] for secondary locations to understand how the nodes + // are ranked. + scores.sort(); if scores.is_empty() { // After applying constraints, no pageservers were left. @@ -481,12 +619,12 @@ impl Scheduler { } // Lowest score wins - let node_id = scores.first().unwrap().0; + let node_id = scores.first().unwrap().node_id(); if !matches!(context.mode, ScheduleMode::Speculative) { tracing::info!( "scheduler selected node {node_id} (elegible nodes {:?}, hard exclude: {hard_exclude:?}, soft exclude: {context:?})", - scores.iter().map(|i| i.0 .0).collect::>() + scores.iter().map(|i| i.node_id().0).collect::>() ); } @@ -556,9 +694,9 @@ mod tests { let context = ScheduleContext::default(); - let scheduled = scheduler.schedule_shard(&[], &context)?; + let scheduled = scheduler.schedule_shard::(&[], &context)?; t1_intent.set_attached(&mut scheduler, Some(scheduled)); - let scheduled = scheduler.schedule_shard(&[], &context)?; + let scheduled = scheduler.schedule_shard::(&[], &context)?; t2_intent.set_attached(&mut scheduler, Some(scheduled)); assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); @@ -567,7 +705,8 @@ mod tests { assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 1); assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); - let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers(), &context)?; + let scheduled = + scheduler.schedule_shard::(&t1_intent.all_pageservers(), &context)?; t1_intent.push_secondary(&mut scheduler, scheduled); assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); @@ -621,7 +760,9 @@ mod tests { scheduler: &mut Scheduler, context: &ScheduleContext, ) { - let scheduled = scheduler.schedule_shard(&[], context).unwrap(); + let scheduled = scheduler + .schedule_shard::(&[], context) + .unwrap(); let mut intent = IntentState::new(); intent.set_attached(scheduler, Some(scheduled)); scheduled_intents.push(intent); diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 957f633feb..5555505b81 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -26,7 +26,7 @@ use crate::{ ShardGenerationState, TenantFilter, }, reconciler::{ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder}, - scheduler::{MaySchedule, ScheduleContext, ScheduleError, ScheduleMode}, + scheduler::{AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode}, tenant_shard::{ MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization, ScheduleOptimizationAction, @@ -2629,7 +2629,8 @@ impl Service { let scheduler = &mut locked.scheduler; // Right now we only perform the operation on a single node without parallelization // TODO fan out the operation to multiple nodes for better performance - let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?; + let node_id = + scheduler.schedule_shard::(&[], &ScheduleContext::default())?; let node = locked .nodes .get(&node_id) @@ -2815,7 +2816,8 @@ impl Service { // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant // was attached, just has to be able to see the S3 content) - let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?; + let node_id = + scheduler.schedule_shard::(&[], &ScheduleContext::default())?; let node = nodes .get(&node_id) .expect("Pageservers may not be deleted while lock is active"); diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index cdb0633e2b..1f5eb423be 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -8,7 +8,10 @@ use crate::{ metrics::{self, ReconcileCompleteLabelGroup, ReconcileOutcome}, persistence::TenantShardPersistence, reconciler::{ReconcileUnits, ReconcilerConfig}, - scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext}, + scheduler::{ + AffinityScore, AttachedShardTag, MaySchedule, RefCountUpdate, ScheduleContext, + SecondaryShardTag, + }, service::ReconcileResultRequest, }; use pageserver_api::controller_api::{ @@ -335,19 +338,19 @@ pub(crate) enum ReconcileWaitError { Failed(TenantShardId, Arc), } -#[derive(Eq, PartialEq, Debug)] +#[derive(Eq, PartialEq, Debug, Clone)] pub(crate) struct ReplaceSecondary { old_node_id: NodeId, new_node_id: NodeId, } -#[derive(Eq, PartialEq, Debug)] +#[derive(Eq, PartialEq, Debug, Clone)] pub(crate) struct MigrateAttachment { pub(crate) old_attached_node_id: NodeId, pub(crate) new_attached_node_id: NodeId, } -#[derive(Eq, PartialEq, Debug)] +#[derive(Eq, PartialEq, Debug, Clone)] pub(crate) enum ScheduleOptimizationAction { // Replace one of our secondary locations with a different node ReplaceSecondary(ReplaceSecondary), @@ -355,7 +358,7 @@ pub(crate) enum ScheduleOptimizationAction { MigrateAttachment(MigrateAttachment), } -#[derive(Eq, PartialEq, Debug)] +#[derive(Eq, PartialEq, Debug, Clone)] pub(crate) struct ScheduleOptimization { // What was the reconcile sequence when we generated this optimization? The optimization // should only be applied if the shard's sequence is still at this value, in case other changes @@ -537,7 +540,8 @@ impl TenantShard { Ok((true, promote_secondary)) } else { // Pick a fresh node: either we had no secondaries or none were schedulable - let node_id = scheduler.schedule_shard(&self.intent.secondary, context)?; + let node_id = + scheduler.schedule_shard::(&self.intent.secondary, context)?; tracing::debug!("Selected {} as attached", node_id); self.intent.set_attached(scheduler, Some(node_id)); Ok((true, node_id)) @@ -613,7 +617,8 @@ impl TenantShard { let mut used_pageservers = vec![attached_node_id]; while self.intent.secondary.len() < secondary_count { - let node_id = scheduler.schedule_shard(&used_pageservers, context)?; + let node_id = scheduler + .schedule_shard::(&used_pageservers, context)?; self.intent.push_secondary(scheduler, node_id); used_pageservers.push(node_id); modified = true; @@ -626,7 +631,7 @@ impl TenantShard { modified = true; } else if self.intent.secondary.is_empty() { // Populate secondary by scheduling a fresh node - let node_id = scheduler.schedule_shard(&[], context)?; + let node_id = scheduler.schedule_shard::(&[], context)?; self.intent.push_secondary(scheduler, node_id); modified = true; } @@ -803,9 +808,10 @@ impl TenantShard { // Let the scheduler suggest a node, where it would put us if we were scheduling afresh // This implicitly limits the choice to nodes that are available, and prefers nodes // with lower utilization. - let Ok(candidate_node) = - scheduler.schedule_shard(&self.intent.all_pageservers(), schedule_context) - else { + let Ok(candidate_node) = scheduler.schedule_shard::( + &self.intent.all_pageservers(), + schedule_context, + ) else { // A scheduling error means we have no possible candidate replacements continue; }; @@ -1333,6 +1339,8 @@ impl TenantShard { #[cfg(test)] pub(crate) mod tests { + use std::{cell::RefCell, rc::Rc}; + use pageserver_api::{ controller_api::NodeAvailability, shard::{ShardCount, ShardNumber}, @@ -1637,12 +1645,14 @@ pub(crate) mod tests { // Optimize til quiescent: this emulates what Service::optimize_all does, when // called repeatedly in the background. + // Returns the applied optimizations fn optimize_til_idle( nodes: &HashMap, scheduler: &mut Scheduler, shards: &mut [TenantShard], - ) { + ) -> Vec { let mut loop_n = 0; + let mut optimizations = Vec::default(); loop { let mut schedule_context = ScheduleContext::default(); let mut any_changed = false; @@ -1657,6 +1667,7 @@ pub(crate) mod tests { for shard in shards.iter_mut() { let optimization = shard.optimize_attachment(nodes, &schedule_context); if let Some(optimization) = optimization { + optimizations.push(optimization.clone()); shard.apply_optimization(scheduler, optimization); any_changed = true; break; @@ -1664,6 +1675,7 @@ pub(crate) mod tests { let optimization = shard.optimize_secondary(scheduler, &schedule_context); if let Some(optimization) = optimization { + optimizations.push(optimization.clone()); shard.apply_optimization(scheduler, optimization); any_changed = true; break; @@ -1678,6 +1690,8 @@ pub(crate) mod tests { loop_n += 1; assert!(loop_n < 1000); } + + optimizations } /// Test the balancing behavior of shard scheduling: that it achieves a balance, and @@ -1730,4 +1744,48 @@ pub(crate) mod tests { Ok(()) } + + /// Test that initial shard scheduling is optimal. By optimal we mean + /// that the optimizer cannot find a way to improve it. + /// + /// This test is an example of the scheduling issue described in + /// https://github.com/neondatabase/neon/issues/8969 + #[test] + fn initial_scheduling_is_optimal() -> anyhow::Result<()> { + use itertools::Itertools; + + let nodes = make_test_nodes(2); + + let mut scheduler = Scheduler::new([].iter()); + scheduler.node_upsert(nodes.get(&NodeId(1)).unwrap()); + scheduler.node_upsert(nodes.get(&NodeId(2)).unwrap()); + + let mut a = make_test_tenant(PlacementPolicy::Attached(1), ShardCount::new(4)); + let a_context = Rc::new(RefCell::new(ScheduleContext::default())); + + let mut b = make_test_tenant(PlacementPolicy::Attached(1), ShardCount::new(4)); + let b_context = Rc::new(RefCell::new(ScheduleContext::default())); + + let a_shards_with_context = a.iter_mut().map(|shard| (shard, a_context.clone())); + let b_shards_with_context = b.iter_mut().map(|shard| (shard, b_context.clone())); + + let schedule_order = a_shards_with_context.interleave(b_shards_with_context); + + for (shard, context) in schedule_order { + let context = &mut *context.borrow_mut(); + shard.schedule(&mut scheduler, context).unwrap(); + } + + let applied_to_a = optimize_til_idle(&nodes, &mut scheduler, &mut a); + assert_eq!(applied_to_a, vec![]); + + let applied_to_b = optimize_til_idle(&nodes, &mut scheduler, &mut b); + assert_eq!(applied_to_b, vec![]); + + for shard in a.iter_mut().chain(b.iter_mut()) { + shard.intent.clear(&mut scheduler); + } + + Ok(()) + } } diff --git a/storage_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs index de6918b3da..525f412b56 100644 --- a/storage_scrubber/src/checks.rs +++ b/storage_scrubber/src/checks.rs @@ -1,13 +1,12 @@ use std::collections::{HashMap, HashSet}; -use anyhow::Context; use itertools::Itertools; use pageserver::tenant::checks::check_valid_layermap; use pageserver::tenant::layer_map::LayerMap; use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; use pageserver_api::shard::ShardIndex; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{info, warn}; use utils::generation::Generation; use utils::id::TimelineId; @@ -29,9 +28,8 @@ pub(crate) struct TimelineAnalysis { /// yet. pub(crate) warnings: Vec, - /// Keys not referenced in metadata: candidates for removal, but NOT NECESSARILY: beware - /// of races between reading the metadata and reading the objects. - pub(crate) garbage_keys: Vec, + /// Objects whose keys were not recognized at all, i.e. not layer files, not indices, and not initdb archive. + pub(crate) unknown_keys: Vec, } impl TimelineAnalysis { @@ -39,7 +37,7 @@ impl TimelineAnalysis { Self { errors: Vec::new(), warnings: Vec::new(), - garbage_keys: Vec::new(), + unknown_keys: Vec::new(), } } @@ -59,7 +57,7 @@ pub(crate) async fn branch_cleanup_and_check_errors( ) -> TimelineAnalysis { let mut result = TimelineAnalysis::new(); - info!("Checking timeline {id}"); + info!("Checking timeline"); if let Some(s3_active_branch) = s3_active_branch { info!( @@ -80,7 +78,7 @@ pub(crate) async fn branch_cleanup_and_check_errors( match s3_data { Some(s3_data) => { result - .garbage_keys + .unknown_keys .extend(s3_data.unknown_keys.into_iter().map(|k| k.key.to_string())); match s3_data.blob_data { @@ -204,10 +202,10 @@ pub(crate) async fn branch_cleanup_and_check_errors( warn!("Timeline metadata warnings: {0:?}", result.warnings); } - if !result.garbage_keys.is_empty() { - error!( - "The following keys should be removed from S3: {0:?}", - result.garbage_keys + if !result.unknown_keys.is_empty() { + warn!( + "The following keys are not recognized: {0:?}", + result.unknown_keys ) } @@ -294,10 +292,10 @@ impl TenantObjectListing { pub(crate) struct RemoteTimelineBlobData { pub(crate) blob_data: BlobDataParseResult, - // Index objects that were not used when loading `blob_data`, e.g. those from old generations + /// Index objects that were not used when loading `blob_data`, e.g. those from old generations pub(crate) unused_index_keys: Vec, - // Objects whose keys were not recognized at all, i.e. not layer files, not indices + /// Objects whose keys were not recognized at all, i.e. not layer files, not indices pub(crate) unknown_keys: Vec, } @@ -329,11 +327,54 @@ pub(crate) fn parse_layer_object_name(name: &str) -> Result<(LayerName, Generati } } +/// Note (): +/// Since we do not gurantee the order of the listing, we could list layer keys right before +/// pageserver `RemoteTimelineClient` deletes the layer files and then the index. +/// In the rare case, this would give back a transient error where the index key is missing. +/// +/// To avoid generating false positive, we try streaming the listing for a second time. pub(crate) async fn list_timeline_blobs( remote_client: &GenericRemoteStorage, id: TenantShardTimelineId, root_target: &RootTarget, ) -> anyhow::Result { + let res = list_timeline_blobs_impl(remote_client, id, root_target).await?; + match res { + ListTimelineBlobsResult::Ready(data) => Ok(data), + ListTimelineBlobsResult::MissingIndexPart(_) => { + // Retry if index is missing. + let data = list_timeline_blobs_impl(remote_client, id, root_target) + .await? + .into_data(); + Ok(data) + } + } +} + +enum ListTimelineBlobsResult { + /// Blob data is ready to be intepreted. + Ready(RemoteTimelineBlobData), + /// List timeline blobs has layer files but is missing [`IndexPart`]. + MissingIndexPart(RemoteTimelineBlobData), +} + +impl ListTimelineBlobsResult { + /// Get the inner blob data regardless the status. + pub fn into_data(self) -> RemoteTimelineBlobData { + match self { + ListTimelineBlobsResult::Ready(data) => data, + ListTimelineBlobsResult::MissingIndexPart(data) => data, + } + } +} + +/// Returns [`ListTimelineBlobsResult::MissingIndexPart`] if blob data has layer files +/// but is missing [`IndexPart`], otherwise returns [`ListTimelineBlobsResult::Ready`]. +async fn list_timeline_blobs_impl( + remote_client: &GenericRemoteStorage, + id: TenantShardTimelineId, + root_target: &RootTarget, +) -> anyhow::Result { let mut s3_layers = HashSet::new(); let mut errors = Vec::new(); @@ -375,30 +416,28 @@ pub(crate) async fn list_timeline_blobs( s3_layers.insert((new_layer, gen)); } Err(e) => { - tracing::info!("Error parsing key {maybe_layer_name}"); - errors.push( - format!("S3 list response got an object with key {key} that is not a layer name: {e}"), - ); + tracing::info!("Error parsing {maybe_layer_name} as layer name: {e}"); unknown_keys.push(obj); } }, None => { - tracing::warn!("Unknown key {key}"); - errors.push(format!("S3 list response got an object with odd key {key}")); + tracing::info!("S3 listed an unknown key: {key}"); unknown_keys.push(obj); } } } - if index_part_keys.is_empty() && s3_layers.is_empty() && initdb_archive { - tracing::debug!( - "Timeline is empty apart from initdb archive: expected post-deletion state." - ); - return Ok(RemoteTimelineBlobData { + if index_part_keys.is_empty() && s3_layers.is_empty() { + tracing::debug!("Timeline is empty: expected post-deletion state."); + if initdb_archive { + tracing::info!("Timeline is post deletion but initdb archive is still present."); + } + + return Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData { blob_data: BlobDataParseResult::Relic, unused_index_keys: index_part_keys, - unknown_keys: Vec::new(), - }); + unknown_keys, + })); } // Choose the index_part with the highest generation @@ -424,19 +463,43 @@ pub(crate) async fn list_timeline_blobs( match index_part_object.as_ref() { Some(selected) => index_part_keys.retain(|k| k != selected), None => { - errors.push("S3 list response got no index_part.json file".to_string()); + // It is possible that the branch gets deleted after we got some layer files listed + // and we no longer have the index file in the listing. + errors.push( + "S3 list response got no index_part.json file but still has layer files" + .to_string(), + ); + return Ok(ListTimelineBlobsResult::MissingIndexPart( + RemoteTimelineBlobData { + blob_data: BlobDataParseResult::Incorrect { errors, s3_layers }, + unused_index_keys: index_part_keys, + unknown_keys, + }, + )); } } if let Some(index_part_object_key) = index_part_object.as_ref() { let index_part_bytes = - download_object_with_retries(remote_client, &index_part_object_key.key) - .await - .context("index_part.json download")?; + match download_object_with_retries(remote_client, &index_part_object_key.key).await { + Ok(index_part_bytes) => index_part_bytes, + Err(e) => { + // It is possible that the branch gets deleted in-between we list the objects + // and we download the index part file. + errors.push(format!("failed to download index_part.json: {e}")); + return Ok(ListTimelineBlobsResult::MissingIndexPart( + RemoteTimelineBlobData { + blob_data: BlobDataParseResult::Incorrect { errors, s3_layers }, + unused_index_keys: index_part_keys, + unknown_keys, + }, + )); + } + }; match serde_json::from_slice(&index_part_bytes) { Ok(index_part) => { - return Ok(RemoteTimelineBlobData { + return Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData { blob_data: BlobDataParseResult::Parsed { index_part: Box::new(index_part), index_part_generation, @@ -444,7 +507,7 @@ pub(crate) async fn list_timeline_blobs( }, unused_index_keys: index_part_keys, unknown_keys, - }) + })) } Err(index_parse_error) => errors.push(format!( "index_part.json body parsing error: {index_parse_error}" @@ -458,9 +521,9 @@ pub(crate) async fn list_timeline_blobs( ); } - Ok(RemoteTimelineBlobData { + Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData { blob_data: BlobDataParseResult::Incorrect { errors, s3_layers }, unused_index_keys: index_part_keys, unknown_keys, - }) + })) } diff --git a/storage_scrubber/src/main.rs b/storage_scrubber/src/main.rs index ee133e2e58..ee816534c6 100644 --- a/storage_scrubber/src/main.rs +++ b/storage_scrubber/src/main.rs @@ -41,6 +41,10 @@ struct Cli { #[arg(long)] /// JWT token for authenticating with storage controller. Requires scope 'scrubber' or 'admin'. controller_jwt: Option, + + /// If set to true, the scrubber will exit with error code on fatal error. + #[arg(long, default_value_t = false)] + exit_code: bool, } #[derive(Subcommand, Debug)] @@ -203,6 +207,7 @@ async fn main() -> anyhow::Result<()> { tenant_ids, json, post_to_storcon, + cli.exit_code, ) .await } @@ -269,6 +274,7 @@ async fn main() -> anyhow::Result<()> { gc_min_age, gc_mode, post_to_storcon, + cli.exit_code, ) .await } @@ -284,6 +290,7 @@ pub async fn run_cron_job( gc_min_age: humantime::Duration, gc_mode: GcMode, post_to_storcon: bool, + exit_code: bool, ) -> anyhow::Result<()> { tracing::info!(%gc_min_age, %gc_mode, "Running pageserver-physical-gc"); pageserver_physical_gc_cmd( @@ -301,6 +308,7 @@ pub async fn run_cron_job( Vec::new(), true, post_to_storcon, + exit_code, ) .await?; @@ -349,6 +357,7 @@ pub async fn scan_pageserver_metadata_cmd( tenant_shard_ids: Vec, json: bool, post_to_storcon: bool, + exit_code: bool, ) -> anyhow::Result<()> { if controller_client.is_none() && post_to_storcon { return Err(anyhow!("Posting pageserver scan health status to storage controller requires `--controller-api` and `--controller-jwt` to run")); @@ -380,6 +389,9 @@ pub async fn scan_pageserver_metadata_cmd( if summary.is_fatal() { tracing::error!("Fatal scrub errors detected"); + if exit_code { + std::process::exit(1); + } } else if summary.is_empty() { // Strictly speaking an empty bucket is a valid bucket, but if someone ran the // scrubber they were likely expecting to scan something, and if we see no timelines @@ -391,6 +403,9 @@ pub async fn scan_pageserver_metadata_cmd( .prefix_in_bucket .unwrap_or("".to_string()) ); + if exit_code { + std::process::exit(1); + } } Ok(()) diff --git a/storage_scrubber/src/scan_pageserver_metadata.rs b/storage_scrubber/src/scan_pageserver_metadata.rs index 151ef27672..c1ea589f7f 100644 --- a/storage_scrubber/src/scan_pageserver_metadata.rs +++ b/storage_scrubber/src/scan_pageserver_metadata.rs @@ -12,6 +12,7 @@ use pageserver_api::controller_api::MetadataHealthUpdateRequest; use pageserver_api::shard::TenantShardId; use remote_storage::GenericRemoteStorage; use serde::Serialize; +use tracing::{info_span, Instrument}; use utils::id::TenantId; use utils::shard::ShardCount; @@ -169,45 +170,54 @@ pub async fn scan_pageserver_metadata( let mut timeline_ids = HashSet::new(); let mut timeline_generations = HashMap::new(); for (ttid, data) in timelines { - if ttid.tenant_shard_id.shard_count == highest_shard_count { - // Only analyze `TenantShardId`s with highest shard count. + async { + if ttid.tenant_shard_id.shard_count == highest_shard_count { + // Only analyze `TenantShardId`s with highest shard count. - // Stash the generation of each timeline, for later use identifying orphan layers - if let BlobDataParseResult::Parsed { - index_part, - index_part_generation, - s3_layers: _s3_layers, - } = &data.blob_data - { - if index_part.deleted_at.is_some() { - // skip deleted timeline. - tracing::info!("Skip analysis of {} b/c timeline is already deleted", ttid); - continue; + // Stash the generation of each timeline, for later use identifying orphan layers + if let BlobDataParseResult::Parsed { + index_part, + index_part_generation, + s3_layers: _s3_layers, + } = &data.blob_data + { + if index_part.deleted_at.is_some() { + // skip deleted timeline. + tracing::info!( + "Skip analysis of {} b/c timeline is already deleted", + ttid + ); + return; + } + timeline_generations.insert(ttid, *index_part_generation); } - timeline_generations.insert(ttid, *index_part_generation); + + // Apply checks to this timeline shard's metadata, and in the process update `tenant_objects` + // reference counts for layers across the tenant. + let analysis = branch_cleanup_and_check_errors( + remote_client, + &ttid, + &mut tenant_objects, + None, + None, + Some(data), + ) + .await; + summary.update_analysis(&ttid, &analysis); + + timeline_ids.insert(ttid.timeline_id); + } else { + tracing::info!( + "Skip analysis of {} b/c a lower shard count than {}", + ttid, + highest_shard_count.0, + ); } - - // Apply checks to this timeline shard's metadata, and in the process update `tenant_objects` - // reference counts for layers across the tenant. - let analysis = branch_cleanup_and_check_errors( - remote_client, - &ttid, - &mut tenant_objects, - None, - None, - Some(data), - ) - .await; - summary.update_analysis(&ttid, &analysis); - - timeline_ids.insert(ttid.timeline_id); - } else { - tracing::info!( - "Skip analysis of {} b/c a lower shard count than {}", - ttid, - highest_shard_count.0, - ); } + .instrument( + info_span!("analyze-timeline", shard = %ttid.tenant_shard_id.shard_slug(), timeline = %ttid.timeline_id), + ) + .await } summary.timeline_count += timeline_ids.len(); @@ -278,6 +288,7 @@ pub async fn scan_pageserver_metadata( timelines, highest_shard_count, ) + .instrument(info_span!("analyze-tenant", tenant = %prev_tenant_id)) .await; tenant_id = Some(ttid.tenant_shard_id.tenant_id); highest_shard_count = ttid.tenant_shard_id.shard_count; @@ -306,15 +317,18 @@ pub async fn scan_pageserver_metadata( tenant_timeline_results.push((ttid, data)); } + let tenant_id = tenant_id.expect("Must be set if results are present"); + if !tenant_timeline_results.is_empty() { analyze_tenant( &remote_client, - tenant_id.expect("Must be set if results are present"), + tenant_id, &mut summary, tenant_objects, tenant_timeline_results, highest_shard_count, ) + .instrument(info_span!("analyze-tenant", tenant = %tenant_id)) .await; } diff --git a/test_runner/cloud_regress/test_cloud_regress.py b/test_runner/cloud_regress/test_cloud_regress.py new file mode 100644 index 0000000000..de71357232 --- /dev/null +++ b/test_runner/cloud_regress/test_cloud_regress.py @@ -0,0 +1,100 @@ +""" +Run the regression tests on the cloud instance of Neon +""" + +from pathlib import Path +from typing import Any + +import psycopg2 +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import RemotePostgres +from fixtures.pg_version import PgVersion + + +@pytest.fixture +def setup(remote_pg: RemotePostgres): + """ + Setup and teardown of the tests + """ + with psycopg2.connect(remote_pg.connstr()) as conn: + with conn.cursor() as cur: + log.info("Creating the extension") + cur.execute("CREATE EXTENSION IF NOT EXISTS regress_so") + conn.commit() + # TODO: Migrate to branches and remove this code + log.info("Looking for subscriptions in the regress database") + cur.execute( + "SELECT subname FROM pg_catalog.pg_subscription WHERE " + "subdbid = (SELECT oid FROM pg_catalog.pg_database WHERE datname='regression');" + ) + if cur.rowcount > 0: + with psycopg2.connect( + dbname="regression", + host=remote_pg.default_options["host"], + user=remote_pg.default_options["user"], + password=remote_pg.default_options["password"], + ) as regress_conn: + with regress_conn.cursor() as regress_cur: + for sub in cur: + regress_cur.execute(f"ALTER SUBSCRIPTION {sub[0]} DISABLE") + regress_cur.execute( + f"ALTER SUBSCRIPTION {sub[0]} SET (slot_name = NONE)" + ) + regress_cur.execute(f"DROP SUBSCRIPTION {sub[0]}") + regress_conn.commit() + + yield + # TODO: Migrate to branches and remove this code + log.info("Looking for extra roles...") + with psycopg2.connect(remote_pg.connstr()) as conn: + with conn.cursor() as cur: + cur.execute( + "SELECT rolname FROM pg_catalog.pg_roles WHERE oid > 16384 AND rolname <> 'neondb_owner'" + ) + roles: list[Any] = [] + for role in cur: + log.info("Role found: %s", role[0]) + roles.append(role[0]) + for role in roles: + cur.execute(f"DROP ROLE {role}") + conn.commit() + + +@pytest.mark.timeout(7200) +@pytest.mark.remote_cluster +def test_cloud_regress( + setup, + remote_pg: RemotePostgres, + pg_version: PgVersion, + pg_distrib_dir: Path, + base_dir: Path, + test_output_dir: Path, +): + """ + Run the regression tests + """ + regress_bin = ( + pg_distrib_dir / f"{pg_version.v_prefixed}/lib/postgresql/pgxs/src/test/regress/pg_regress" + ) + test_path = base_dir / f"vendor/postgres-{pg_version.v_prefixed}/src/test/regress" + + env_vars = { + "PGHOST": remote_pg.default_options["host"], + "PGPORT": str( + remote_pg.default_options["port"] if "port" in remote_pg.default_options else 5432 + ), + "PGUSER": remote_pg.default_options["user"], + "PGPASSWORD": remote_pg.default_options["password"], + "PGDATABASE": remote_pg.default_options["dbname"], + } + regress_cmd = [ + str(regress_bin), + f"--inputdir={test_path}", + f"--bindir={pg_distrib_dir}/{pg_version.v_prefixed}/bin", + "--dlpath=/usr/local/lib", + "--max-concurrent-tests=20", + f"--schedule={test_path}/parallel_schedule", + "--max-connections=5", + ] + remote_pg.pg_bin.run(regress_cmd, env=env_vars, cwd=test_output_dir) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index fc83cf3f7c..55c1423ed0 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -849,7 +849,7 @@ class NeonEnvBuilder: for directory_to_clean in reversed(directories_to_clean): if not os.listdir(directory_to_clean): - log.info(f"Removing empty directory {directory_to_clean}") + log.debug(f"Removing empty directory {directory_to_clean}") try: directory_to_clean.rmdir() except Exception as e: diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 582f9c0264..0dd557c59f 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -631,7 +631,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): log.info( f"requesting timeline archival config {config} for tenant {tenant_id} and timeline {timeline_id}" ) - res = self.post( + res = self.put( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/archival_config", json=config, ) diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 80f1c9e4e3..10e8412b19 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -236,7 +236,7 @@ def get_scale_for_db(size_mb: int) -> int: ATTACHMENT_NAME_REGEX: re.Pattern = re.compile( # type: ignore[type-arg] - r"regression\.diffs|.+\.(?:log|stderr|stdout|filediff|metrics|html|walredo)" + r"regression\.(diffs|out)|.+\.(?:log|stderr|stdout|filediff|metrics|html|walredo)" ) diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py new file mode 100644 index 0000000000..6138c322d7 --- /dev/null +++ b/test_runner/regress/test_compute_metrics.py @@ -0,0 +1,21 @@ +from fixtures.neon_fixtures import NeonEnv + + +def test_compute_metrics(neon_simple_env: NeonEnv): + """ + Test compute metrics, exposed in the neon_backend_perf_counters and + neon_perf_counters views + """ + env = neon_simple_env + endpoint = env.endpoints.create_start("main") + + conn = endpoint.connect() + cur = conn.cursor() + + # We don't check that the values make sense, this is just a very + # basic check that the server doesn't crash or something like that. + # + # 1.5 is the minimum version to contain these views. + cur.execute("CREATE EXTENSION neon VERSION '1.5'") + cur.execute("SELECT * FROM neon_perf_counters") + cur.execute("SELECT * FROM neon_backend_perf_counters") diff --git a/test_runner/regress/test_neon_extension.py b/test_runner/regress/test_neon_extension.py index bb844244e3..22a6013225 100644 --- a/test_runner/regress/test_neon_extension.py +++ b/test_runner/regress/test_neon_extension.py @@ -50,8 +50,8 @@ def test_neon_extension_compatibility(neon_env_builder: NeonEnvBuilder): # Ensure that the default version is also updated in the neon.control file assert cur.fetchone() == ("1.4",) cur.execute("SELECT * from neon.NEON_STAT_FILE_CACHE") - all_versions = ["1.4", "1.3", "1.2", "1.1", "1.0"] - current_version = "1.4" + all_versions = ["1.5", "1.4", "1.3", "1.2", "1.1", "1.0"] + current_version = "1.5" for idx, begin_version in enumerate(all_versions): for target_version in all_versions[idx + 1 :]: if current_version != begin_version: diff --git a/test_runner/regress/test_twophase.py b/test_runner/regress/test_twophase.py index ebe65e7c29..75fab78d6e 100644 --- a/test_runner/regress/test_twophase.py +++ b/test_runner/regress/test_twophase.py @@ -8,6 +8,7 @@ from fixtures.neon_fixtures import ( PgBin, fork_at_current_lsn, import_timeline_from_vanilla_postgres, + wait_for_wal_insert_lsn, ) @@ -22,11 +23,6 @@ def twophase_test_on_timeline(env: NeonEnv): conn = endpoint.connect() cur = conn.cursor() - # FIXME: Switch to the next WAL segment, to work around the bug fixed in - # https://github.com/neondatabase/neon/pull/8914. When that is merged, this can be - # removed. - cur.execute("select pg_switch_wal()") - cur.execute("CREATE TABLE foo (t text)") # Prepare a transaction that will insert a row @@ -140,3 +136,28 @@ def test_twophase_nonzero_epoch( vanilla_pg.stop() # don't need the original server anymore twophase_test_on_timeline(env) + + +def test_twophase_at_wal_segment_start(neon_simple_env: NeonEnv): + """ + Same as 'test_twophase' test, but the server is started at an LSN at the beginning + of a WAL segment. We had a bug where we didn't initialize the "long XLOG page header" + at the beginning of the segment correctly, which was detected when the checkpointer + tried to read the XLOG_XACT_PREPARE record from the WAL, if that record was on the + very first page of a WAL segment and the server was started up at that first page. + """ + env = neon_simple_env + timeline_id = env.neon_cli.create_branch("test_twophase", "main") + + endpoint = env.endpoints.create_start( + "test_twophase", config_lines=["max_prepared_transactions=5"] + ) + endpoint.safe_psql("SELECT pg_switch_wal()") + + # to avoid hitting https://github.com/neondatabase/neon/issues/9079, wait for the + # WAL to reach the pageserver. + wait_for_wal_insert_lsn(env, endpoint, env.initial_tenant, timeline_id) + + endpoint.stop_and_destroy() + + twophase_test_on_timeline(env) diff --git a/test_runner/regress/test_unlogged.py b/test_runner/regress/test_unlogged.py index deba29536c..4431ccd959 100644 --- a/test_runner/regress/test_unlogged.py +++ b/test_runner/regress/test_unlogged.py @@ -15,8 +15,13 @@ def test_unlogged(neon_simple_env: NeonEnv): cur = conn.cursor() cur.execute("CREATE UNLOGGED TABLE iut (id int);") - # create index to test unlogged index relation as well + # create index to test unlogged index relations as well cur.execute("CREATE UNIQUE INDEX iut_idx ON iut (id);") + cur.execute("CREATE INDEX ON iut USING gist (int4range(id, id, '[]'));") + cur.execute("CREATE INDEX ON iut USING spgist (int4range(id, id, '[]'));") + cur.execute("CREATE INDEX ON iut USING gin ((id::text::jsonb));") + cur.execute("CREATE INDEX ON iut USING brin (id);") + cur.execute("CREATE INDEX ON iut USING hash (id);") cur.execute("ALTER TABLE iut ADD COLUMN seq int GENERATED ALWAYS AS IDENTITY;") cur.execute("INSERT INTO iut (id) values (42);") @@ -39,3 +44,12 @@ def test_unlogged(neon_simple_env: NeonEnv): assert results == [(43, 2)] else: assert results == [(43, 1)] + + # Flush all data and compact it, so we detect any errors related to + # unlogged indexes materialization. + ps_http = env.pageserver.http_client() + ps_http.timeline_compact( + tenant_id=env.initial_tenant, + timeline_id=env.initial_timeline, + force_image_layer_creation=True, + ) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 87cb68f899..2199b83fb7 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 87cb68f899db434cd6f1908cf0ac8fdeafdd88c1 +Subproject commit 2199b83fb72680001ce0f43bf6187a21dfb8f45d diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index 72b904c0b3..22e580fe9f 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit 72b904c0b3ac43bd74d1e8e6d772e2c476ae25b1 +Subproject commit 22e580fe9ffcea7e02592110b1c9bf426d83cada diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 3ec6e2496f..e131a9c027 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 3ec6e2496f64c6fec35c67cb82efd6490a6a4738 +Subproject commit e131a9c027b202ce92bd7b9cf2569d48a6f9948e diff --git a/vendor/postgres-v17 b/vendor/postgres-v17 index 5bbb9bd93d..68b5038f27 160000 --- a/vendor/postgres-v17 +++ b/vendor/postgres-v17 @@ -1 +1 @@ -Subproject commit 5bbb9bd93dd805e90bd8af15d00080363d18ec68 +Subproject commit 68b5038f27e493bde6ae552fe066f10cbdfe6a14 diff --git a/vendor/revisions.json b/vendor/revisions.json index 6289a53670..896a75814e 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,18 +1,18 @@ { "v17": [ - "17rc1", - "5bbb9bd93dd805e90bd8af15d00080363d18ec68" + "17.0", + "68b5038f27e493bde6ae552fe066f10cbdfe6a14" ], "v16": [ "16.4", - "3ec6e2496f64c6fec35c67cb82efd6490a6a4738" + "e131a9c027b202ce92bd7b9cf2569d48a6f9948e" ], "v15": [ "15.8", - "72b904c0b3ac43bd74d1e8e6d772e2c476ae25b1" + "22e580fe9ffcea7e02592110b1c9bf426d83cada" ], "v14": [ "14.13", - "87cb68f899db434cd6f1908cf0ac8fdeafdd88c1" + "2199b83fb72680001ce0f43bf6187a21dfb8f45d" ] } diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml deleted file mode 100644 index c94f95f447..0000000000 --- a/vm-image-spec.yaml +++ /dev/null @@ -1,550 +0,0 @@ -# Supplemental file for neondatabase/autoscaling's vm-builder, for producing the VM compute image. ---- -commands: - - name: cgconfigparser - user: root - sysvInitAction: sysinit - shell: 'cgconfigparser -l /etc/cgconfig.conf -s 1664' - # restrict permissions on /neonvm/bin/resize-swap, because we grant access to compute_ctl for - # running it as root. - - name: chmod-resize-swap - user: root - sysvInitAction: sysinit - shell: 'chmod 711 /neonvm/bin/resize-swap' - - name: pgbouncer - user: postgres - sysvInitAction: respawn - shell: '/usr/local/bin/pgbouncer /etc/pgbouncer.ini' - - name: postgres-exporter - user: nobody - sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter' - - name: sql-exporter - user: nobody - sysvInitAction: respawn - shell: '/bin/sql_exporter -config.file=/etc/sql_exporter.yml -web.listen-address=:9399' - - name: sql-exporter-autoscaling - user: nobody - sysvInitAction: respawn - shell: '/bin/sql_exporter -config.file=/etc/sql_exporter_autoscaling.yml -web.listen-address=:9499' -shutdownHook: | - su -p postgres --session-command '/usr/local/bin/pg_ctl stop -D /var/db/postgres/compute/pgdata -m fast --wait -t 10' -files: - - filename: compute_ctl-resize-swap - content: | - # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap - # as root without requiring entering a password (NOPASSWD), regardless of hostname (ALL) - postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap - - filename: pgbouncer.ini - content: | - [databases] - *=host=localhost port=5432 auth_user=cloud_admin - [pgbouncer] - listen_port=6432 - listen_addr=0.0.0.0 - auth_type=scram-sha-256 - auth_user=cloud_admin - auth_dbname=postgres - client_tls_sslmode=disable - server_tls_sslmode=disable - pool_mode=transaction - max_client_conn=10000 - default_pool_size=64 - max_prepared_statements=0 - admin_users=postgres - unix_socket_dir=/tmp/ - unix_socket_mode=0777 - - filename: cgconfig.conf - content: | - # Configuration for cgroups in VM compute nodes - group neon-postgres { - perm { - admin { - uid = postgres; - } - task { - gid = users; - } - } - memory {} - } - - filename: sql_exporter.yml - content: | - # Configuration for sql_exporter - # Global defaults. - global: - # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. - scrape_timeout: 10s - # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. - scrape_timeout_offset: 500ms - # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. - min_interval: 0s - # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, - # as will concurrent scrapes. - max_connections: 1 - # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should - # always be the same as max_connections. - max_idle_connections: 1 - # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. - # If 0, connections are not closed due to a connection's age. - max_connection_lifetime: 5m - - # The target to monitor and the collectors to execute on it. - target: - # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) - # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' - - # Collectors (referenced by name) to execute on the target. - # Glob patterns are supported (see for syntax). - collectors: [neon_collector] - - # Collector files specifies a list of globs. One collector definition is read from each matching file. - # Glob patterns are supported (see for syntax). - collector_files: - - "neon_collector.yml" - - filename: sql_exporter_autoscaling.yml - content: | - # Configuration for sql_exporter for autoscaling-agent - # Global defaults. - global: - # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. - scrape_timeout: 10s - # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. - scrape_timeout_offset: 500ms - # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. - min_interval: 0s - # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, - # as will concurrent scrapes. - max_connections: 1 - # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should - # always be the same as max_connections. - max_idle_connections: 1 - # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. - # If 0, connections are not closed due to a connection's age. - max_connection_lifetime: 5m - - # The target to monitor and the collectors to execute on it. - target: - # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) - # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' - - # Collectors (referenced by name) to execute on the target. - # Glob patterns are supported (see for syntax). - collectors: [neon_collector_autoscaling] - - # Collector files specifies a list of globs. One collector definition is read from each matching file. - # Glob patterns are supported (see for syntax). - collector_files: - - "neon_collector_autoscaling.yml" - - filename: neon_collector.yml - content: | - collector_name: neon_collector - metrics: - - metric_name: lfc_misses - type: gauge - help: 'lfc_misses' - key_labels: - values: [lfc_misses] - query: | - select lfc_value as lfc_misses from neon.neon_lfc_stats where lfc_key='file_cache_misses'; - - - metric_name: lfc_used - type: gauge - help: 'LFC chunks used (chunk = 1MB)' - key_labels: - values: [lfc_used] - query: | - select lfc_value as lfc_used from neon.neon_lfc_stats where lfc_key='file_cache_used'; - - - metric_name: lfc_hits - type: gauge - help: 'lfc_hits' - key_labels: - values: [lfc_hits] - query: | - select lfc_value as lfc_hits from neon.neon_lfc_stats where lfc_key='file_cache_hits'; - - - metric_name: lfc_writes - type: gauge - help: 'lfc_writes' - key_labels: - values: [lfc_writes] - query: | - select lfc_value as lfc_writes from neon.neon_lfc_stats where lfc_key='file_cache_writes'; - - - metric_name: lfc_cache_size_limit - type: gauge - help: 'LFC cache size limit in bytes' - key_labels: - values: [lfc_cache_size_limit] - query: | - select pg_size_bytes(current_setting('neon.file_cache_size_limit')) as lfc_cache_size_limit; - - - metric_name: connection_counts - type: gauge - help: 'Connection counts' - key_labels: - - datname - - state - values: [count] - query: | - select datname, state, count(*) as count from pg_stat_activity where state <> '' group by datname, state; - - - metric_name: pg_stats_userdb - type: gauge - help: 'Stats for several oldest non-system dbs' - key_labels: - - datname - value_label: kind - values: - - db_size - - deadlocks - # Rows - - inserted - - updated - - deleted - # We export stats for 10 non-system database. Without this limit - # it is too easy to abuse the system by creating lots of databases. - query: | - select pg_database_size(datname) as db_size, deadlocks, - tup_inserted as inserted, tup_updated as updated, tup_deleted as deleted, - datname - from pg_stat_database - where datname IN ( - select datname - from pg_database - where datname <> 'postgres' and not datistemplate - order by oid - limit 10 - ); - - - metric_name: max_cluster_size - type: gauge - help: 'neon.max_cluster_size setting' - key_labels: - values: [max_cluster_size] - query: | - select setting::int as max_cluster_size from pg_settings where name = 'neon.max_cluster_size'; - - - metric_name: db_total_size - type: gauge - help: 'Size of all databases' - key_labels: - values: [total] - query: | - select sum(pg_database_size(datname)) as total from pg_database; - - # DEPRECATED - - metric_name: lfc_approximate_working_set_size - type: gauge - help: 'Approximate working set size in pages of 8192 bytes' - key_labels: - values: [approximate_working_set_size] - query: | - select neon.approximate_working_set_size(false) as approximate_working_set_size; - - - metric_name: lfc_approximate_working_set_size_windows - type: gauge - help: 'Approximate working set size in pages of 8192 bytes' - key_labels: [duration] - values: [size] - # NOTE: This is the "public" / "human-readable" version. Here, we supply a small selection - # of durations in a pretty-printed form. - query: | - select - x as duration, - neon.approximate_working_set_size_seconds(extract('epoch' from x::interval)::int) as size - from - (values ('5m'),('15m'),('1h')) as t (x); - - - metric_name: compute_current_lsn - type: gauge - help: 'Current LSN of the database' - key_labels: - values: [lsn] - query: | - select - case - when pg_catalog.pg_is_in_recovery() - then (pg_last_wal_replay_lsn() - '0/0')::FLOAT8 - else (pg_current_wal_lsn() - '0/0')::FLOAT8 - end as lsn; - - - metric_name: compute_receive_lsn - type: gauge - help: 'Returns the last write-ahead log location that has been received and synced to disk by streaming replication' - key_labels: - values: [lsn] - query: | - SELECT - CASE - WHEN pg_catalog.pg_is_in_recovery() - THEN (pg_last_wal_receive_lsn() - '0/0')::FLOAT8 - ELSE 0 - END AS lsn; - - - metric_name: replication_delay_bytes - type: gauge - help: 'Bytes between received and replayed LSN' - key_labels: - values: [replication_delay_bytes] - # We use a GREATEST call here because this calculation can be negative. - # The calculation is not atomic, meaning after we've gotten the receive - # LSN, the replay LSN may have advanced past the receive LSN we - # are using for the calculation. - query: | - SELECT GREATEST(0, pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn())) AS replication_delay_bytes; - - - metric_name: replication_delay_seconds - type: gauge - help: 'Time since last LSN was replayed' - key_labels: - values: [replication_delay_seconds] - query: | - SELECT - CASE - WHEN pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() THEN 0 - ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) - END AS replication_delay_seconds; - - - metric_name: checkpoints_req - type: gauge - help: 'Number of requested checkpoints' - key_labels: - values: [checkpoints_req] - query: | - SELECT checkpoints_req FROM pg_stat_bgwriter; - - - metric_name: checkpoints_timed - type: gauge - help: 'Number of scheduled checkpoints' - key_labels: - values: [checkpoints_timed] - query: | - SELECT checkpoints_timed FROM pg_stat_bgwriter; - - - metric_name: compute_logical_snapshot_files - type: gauge - help: 'Number of snapshot files in pg_logical/snapshot' - key_labels: - - timeline_id - values: [num_logical_snapshot_files] - query: | - SELECT - (SELECT setting FROM pg_settings WHERE name = 'neon.timeline_id') AS timeline_id, - -- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp. These - -- temporary snapshot files are renamed to the actual snapshot files after they are - -- completely built. We only WAL-log the completely built snapshot files. - (SELECT COUNT(*) FROM pg_ls_logicalsnapdir() WHERE name LIKE '%.snap') AS num_logical_snapshot_files; - - # In all the below metrics, we cast LSNs to floats because Prometheus only supports floats. - # It's probably fine because float64 can store integers from -2^53 to +2^53 exactly. - - # Number of slots is limited by max_replication_slots, so collecting position for all of them shouldn't be bad. - - metric_name: logical_slot_restart_lsn - type: gauge - help: 'restart_lsn of logical slots' - key_labels: - - slot_name - values: [restart_lsn] - query: | - select slot_name, (restart_lsn - '0/0')::FLOAT8 as restart_lsn - from pg_replication_slots - where slot_type = 'logical'; - - - metric_name: compute_subscriptions_count - type: gauge - help: 'Number of logical replication subscriptions grouped by enabled/disabled' - key_labels: - - enabled - values: [subscriptions_count] - query: | - select subenabled::text as enabled, count(*) as subscriptions_count - from pg_subscription - group by subenabled; - - - metric_name: retained_wal - type: gauge - help: 'Retained WAL in inactive replication slots' - key_labels: - - slot_name - values: [retained_wal] - query: | - SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)::FLOAT8 AS retained_wal - FROM pg_replication_slots - WHERE active = false; - - - metric_name: wal_is_lost - type: gauge - help: 'Whether or not the replication slot wal_status is lost' - key_labels: - - slot_name - values: [wal_is_lost] - query: | - SELECT slot_name, - CASE - WHEN wal_status = 'lost' THEN 1 - ELSE 0 - END AS wal_is_lost - FROM pg_replication_slots; - - - filename: neon_collector_autoscaling.yml - content: | - collector_name: neon_collector_autoscaling - metrics: - - metric_name: lfc_misses - type: gauge - help: 'lfc_misses' - key_labels: - values: [lfc_misses] - query: | - select lfc_value as lfc_misses from neon.neon_lfc_stats where lfc_key='file_cache_misses'; - - - metric_name: lfc_used - type: gauge - help: 'LFC chunks used (chunk = 1MB)' - key_labels: - values: [lfc_used] - query: | - select lfc_value as lfc_used from neon.neon_lfc_stats where lfc_key='file_cache_used'; - - - metric_name: lfc_hits - type: gauge - help: 'lfc_hits' - key_labels: - values: [lfc_hits] - query: | - select lfc_value as lfc_hits from neon.neon_lfc_stats where lfc_key='file_cache_hits'; - - - metric_name: lfc_writes - type: gauge - help: 'lfc_writes' - key_labels: - values: [lfc_writes] - query: | - select lfc_value as lfc_writes from neon.neon_lfc_stats where lfc_key='file_cache_writes'; - - - metric_name: lfc_cache_size_limit - type: gauge - help: 'LFC cache size limit in bytes' - key_labels: - values: [lfc_cache_size_limit] - query: | - select pg_size_bytes(current_setting('neon.file_cache_size_limit')) as lfc_cache_size_limit; - - - metric_name: lfc_approximate_working_set_size_windows - type: gauge - help: 'Approximate working set size in pages of 8192 bytes' - key_labels: [duration_seconds] - values: [size] - # NOTE: This is the "internal" / "machine-readable" version. This outputs the working set - # size looking back 1..60 minutes, labeled with the number of minutes. - query: | - select - x::text as duration_seconds, - neon.approximate_working_set_size_seconds(x) as size - from - (select generate_series * 60 as x from generate_series(1, 60)) as t (x); -build: | - # Build cgroup-tools - # - # At time of writing (2023-03-14), debian bullseye has a version of cgroup-tools (technically - # libcgroup) that doesn't support cgroup v2 (version 0.41-11). Unfortunately, the vm-monitor - # requires cgroup v2, so we'll build cgroup-tools ourselves. - FROM debian:bullseye-slim as libcgroup-builder - ENV LIBCGROUP_VERSION=v2.0.3 - - RUN set -exu \ - && apt update \ - && apt install --no-install-recommends -y \ - git \ - ca-certificates \ - automake \ - cmake \ - make \ - gcc \ - byacc \ - flex \ - libtool \ - libpam0g-dev \ - && git clone --depth 1 -b $LIBCGROUP_VERSION https://github.com/libcgroup/libcgroup \ - && INSTALL_DIR="/libcgroup-install" \ - && mkdir -p "$INSTALL_DIR/bin" "$INSTALL_DIR/include" \ - && cd libcgroup \ - # extracted from bootstrap.sh, with modified flags: - && (test -d m4 || mkdir m4) \ - && autoreconf -fi \ - && rm -rf autom4te.cache \ - && CFLAGS="-O3" ./configure --prefix="$INSTALL_DIR" --sysconfdir=/etc --localstatedir=/var --enable-opaque-hierarchy="name=systemd" \ - # actually build the thing... - && make install - - FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter - - FROM burningalchemist/sql_exporter:0.13 AS sql-exporter - - # Build pgbouncer - # - FROM debian:bullseye-slim AS pgbouncer - RUN set -e \ - && apt-get update \ - && apt-get install -y \ - build-essential \ - git \ - libevent-dev \ - libtool \ - pkg-config - - # Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc) - ENV PGBOUNCER_TAG=pgbouncer_1_22_1 - RUN set -e \ - && git clone --recurse-submodules --depth 1 --branch ${PGBOUNCER_TAG} https://github.com/pgbouncer/pgbouncer.git pgbouncer \ - && cd pgbouncer \ - && ./autogen.sh \ - && LDFLAGS=-static ./configure --prefix=/usr/local/pgbouncer --without-openssl \ - && make -j $(nproc) dist_man_MANS= \ - && make install dist_man_MANS= -merge: | - # tweak nofile limits - RUN set -e \ - && echo 'fs.file-max = 1048576' >>/etc/sysctl.conf \ - && test ! -e /etc/security || ( \ - echo '* - nofile 1048576' >>/etc/security/limits.conf \ - && echo 'root - nofile 1048576' >>/etc/security/limits.conf \ - ) - - # Allow postgres user (compute_ctl) to run swap resizer. - # Need to install sudo in order to allow this. - # - # Also, remove the 'read' permission from group/other on /neonvm/bin/resize-swap, just to be safe. - RUN set -e \ - && apt update \ - && apt install --no-install-recommends -y \ - sudo \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - COPY compute_ctl-resize-swap /etc/sudoers.d/compute_ctl-resize-swap - - COPY cgconfig.conf /etc/cgconfig.conf - COPY pgbouncer.ini /etc/pgbouncer.ini - COPY sql_exporter.yml /etc/sql_exporter.yml - COPY neon_collector.yml /etc/neon_collector.yml - COPY sql_exporter_autoscaling.yml /etc/sql_exporter_autoscaling.yml - COPY neon_collector_autoscaling.yml /etc/neon_collector_autoscaling.yml - - RUN set -e \ - && chown postgres:postgres /etc/pgbouncer.ini \ - && chmod 0666 /etc/pgbouncer.ini \ - && chmod 0644 /etc/cgconfig.conf \ - && chmod 0644 /etc/sql_exporter.yml \ - && chmod 0644 /etc/neon_collector.yml \ - && chmod 0644 /etc/sql_exporter_autoscaling.yml \ - && chmod 0644 /etc/neon_collector_autoscaling.yml - - COPY --from=libcgroup-builder /libcgroup-install/bin/* /usr/bin/ - COPY --from=libcgroup-builder /libcgroup-install/lib/* /usr/lib/ - COPY --from=libcgroup-builder /libcgroup-install/sbin/* /usr/sbin/ - COPY --from=postgres-exporter /bin/postgres_exporter /bin/postgres_exporter - COPY --from=sql-exporter /bin/sql_exporter /bin/sql_exporter - COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/pgbouncer diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index 662916d42c..e6d21e9434 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -45,6 +45,7 @@ futures-io = { version = "0.3" } futures-util = { version = "0.3", features = ["channel", "io", "sink"] } generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2", default-features = false, features = ["std"] } +half = { version = "2", default-features = false, features = ["num-traits"] } hashbrown = { version = "0.14", features = ["raw"] } hex = { version = "0.4", features = ["serde"] } hmac = { version = "0.12", default-features = false, features = ["reset"] } @@ -106,6 +107,7 @@ cc = { version = "1", default-features = false, features = ["parallel"] } chrono = { version = "0.4", default-features = false, features = ["clock", "serde", "wasmbind"] } either = { version = "1" } getrandom = { version = "0.2", default-features = false, features = ["std"] } +half = { version = "2", default-features = false, features = ["num-traits"] } hashbrown = { version = "0.14", features = ["raw"] } indexmap = { version = "1", default-features = false, features = ["std"] } itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12", default-features = false, features = ["use_std"] }