Compare commits

..

4 Commits

Author SHA1 Message Date
Conrad Ludgate
01475c9e75 fix accidental recursion 2024-12-06 12:19:40 +00:00
Conrad Ludgate
c835bbba1f refactor statements and the type cache to avoid arcs 2024-12-06 12:01:19 +00:00
Conrad Ludgate
f94dde4432 delete some more 2024-12-06 11:33:34 +00:00
Conrad Ludgate
4991a85704 delete some client methods and make client take &mut 2024-12-06 11:22:03 +00:00
260 changed files with 3034 additions and 11268 deletions

View File

@@ -21,7 +21,3 @@ config-variables:
- SLACK_UPCOMING_RELEASE_CHANNEL_ID - SLACK_UPCOMING_RELEASE_CHANNEL_ID
- DEV_AWS_OIDC_ROLE_ARN - DEV_AWS_OIDC_ROLE_ARN
- BENCHMARK_INGEST_TARGET_PROJECTID - BENCHMARK_INGEST_TARGET_PROJECTID
- PGREGRESS_PG16_PROJECT_ID
- PGREGRESS_PG17_PROJECT_ID
- SLACK_ON_CALL_QA_STAGING_STREAM
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN

View File

@@ -7,9 +7,10 @@ inputs:
type: boolean type: boolean
required: false required: false
default: false default: false
aws-oicd-role-arn: aws_oicd_role_arn:
description: 'OIDC role arn to interract with S3' description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: true required: false
default: ''
outputs: outputs:
base-url: base-url:
@@ -83,11 +84,12 @@ runs:
ALLURE_VERSION: 2.27.0 ALLURE_VERSION: 2.27.0
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777 ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
- uses: aws-actions/configure-aws-credentials@v4 - name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() }} if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }} role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this # Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this

View File

@@ -8,9 +8,10 @@ inputs:
unique-key: unique-key:
description: 'string to distinguish different results in the same run' description: 'string to distinguish different results in the same run'
required: true required: true
aws-oicd-role-arn: aws_oicd_role_arn:
description: 'OIDC role arn to interract with S3' description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: true required: false
default: ''
runs: runs:
using: "composite" using: "composite"
@@ -35,11 +36,12 @@ runs:
env: env:
REPORT_DIR: ${{ inputs.report-dir }} REPORT_DIR: ${{ inputs.report-dir }}
- uses: aws-actions/configure-aws-credentials@v4 - name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() }} if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }} role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
- name: Upload test results - name: Upload test results

View File

@@ -15,19 +15,10 @@ inputs:
prefix: prefix:
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'" description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
required: false required: false
aws-oicd-role-arn:
description: 'OIDC role arn to interract with S3'
required: true
runs: runs:
using: "composite" using: "composite"
steps: steps:
- uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-duration-seconds: 3600
- name: Download artifact - name: Download artifact
id: download-artifact id: download-artifact
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}

View File

@@ -48,9 +48,10 @@ inputs:
description: 'benchmark durations JSON' description: 'benchmark durations JSON'
required: false required: false
default: '{}' default: '{}'
aws-oicd-role-arn: aws_oicd_role_arn:
description: 'OIDC role arn to interract with S3' description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: true required: false
default: ''
runs: runs:
using: "composite" using: "composite"
@@ -61,7 +62,6 @@ runs:
with: with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
path: /tmp/neon path: /tmp/neon
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Download Neon binaries for the previous release - name: Download Neon binaries for the previous release
if: inputs.build_type != 'remote' if: inputs.build_type != 'remote'
@@ -70,7 +70,6 @@ runs:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
path: /tmp/neon-previous path: /tmp/neon-previous
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Download compatibility snapshot - name: Download compatibility snapshot
if: inputs.build_type != 'remote' if: inputs.build_type != 'remote'
@@ -82,7 +81,6 @@ runs:
# The lack of compatibility snapshot (for example, for the new Postgres version) # The lack of compatibility snapshot (for example, for the new Postgres version)
# shouldn't fail the whole job. Only relevant test should fail. # shouldn't fail the whole job. Only relevant test should fail.
skip-if-does-not-exist: true skip-if-does-not-exist: true
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Checkout - name: Checkout
if: inputs.needs_postgres_source == 'true' if: inputs.needs_postgres_source == 'true'
@@ -220,19 +218,17 @@ runs:
# The lack of compatibility snapshot shouldn't fail the job # The lack of compatibility snapshot shouldn't fail the job
# (for example if we didn't run the test for non build-and-test workflow) # (for example if we didn't run the test for non build-and-test workflow)
skip-if-does-not-exist: true skip-if-does-not-exist: true
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- uses: aws-actions/configure-aws-credentials@v4 - name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() }} if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }} role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
- name: Upload test results - name: Upload test results
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-store uses: ./.github/actions/allure-report-store
with: with:
report-dir: /tmp/test_output/allure/results report-dir: /tmp/test_output/allure/results
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }} unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}

View File

@@ -14,11 +14,9 @@ runs:
name: coverage-data-artifact name: coverage-data-artifact
path: /tmp/coverage path: /tmp/coverage
skip-if-does-not-exist: true # skip if there's no previous coverage to download skip-if-does-not-exist: true # skip if there's no previous coverage to download
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Upload coverage data - name: Upload coverage data
uses: ./.github/actions/upload uses: ./.github/actions/upload
with: with:
name: coverage-data-artifact name: coverage-data-artifact
path: /tmp/coverage path: /tmp/coverage
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}

View File

@@ -14,10 +14,6 @@ inputs:
prefix: prefix:
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'" description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
required: false required: false
aws-oicd-role-arn:
description: "the OIDC role arn for aws auth"
required: false
default: ""
runs: runs:
using: "composite" using: "composite"
@@ -57,13 +53,6 @@ runs:
echo 'SKIPPED=false' >> $GITHUB_OUTPUT echo 'SKIPPED=false' >> $GITHUB_OUTPUT
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-duration-seconds: 3600
- name: Upload artifact - name: Upload artifact
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }} if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}

View File

@@ -70,7 +70,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# we create a table that has one row for each database that we want to restore with the status whether the restore is done # we create a table that has one row for each database that we want to restore with the status whether the restore is done
- name: Create benchmark_restore_status table if it does not exist - name: Create benchmark_restore_status table if it does not exist

View File

@@ -31,13 +31,12 @@ defaults:
env: env:
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
COPT: '-Werror' COPT: '-Werror'
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
jobs: jobs:
build-neon: build-neon:
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }} runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
permissions:
id-token: write # aws-actions/configure-aws-credentials
contents: read
container: container:
image: ${{ inputs.build-tools-image }} image: ${{ inputs.build-tools-image }}
credentials: credentials:
@@ -206,13 +205,6 @@ jobs:
done done
fi fi
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 18000 # 5 hours
- name: Run rust tests - name: Run rust tests
env: env:
NEXTEST_RETRIES: 3 NEXTEST_RETRIES: 3
@@ -264,7 +256,6 @@ jobs:
with: with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
path: /tmp/neon path: /tmp/neon
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later # XXX: keep this after the binaries.list is formed, so the coverage can properly work later
- name: Merge and upload coverage data - name: Merge and upload coverage data
@@ -274,10 +265,6 @@ jobs:
regress-tests: regress-tests:
# Don't run regression tests on debug arm64 builds # Don't run regression tests on debug arm64 builds
if: inputs.build-type != 'debug' || inputs.arch != 'arm64' if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
permissions:
id-token: write # aws-actions/configure-aws-credentials
contents: read
statuses: write
needs: [ build-neon ] needs: [ build-neon ]
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }} runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
container: container:
@@ -296,7 +283,7 @@ jobs:
submodules: true submodules: true
- name: Pytest regression tests - name: Pytest regression tests
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }} continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
timeout-minutes: 60 timeout-minutes: 60
with: with:
@@ -308,7 +295,6 @@ jobs:
real_s3_region: eu-central-1 real_s3_region: eu-central-1
rerun_failed: true rerun_failed: true
pg_version: ${{ matrix.pg_version }} pg_version: ${{ matrix.pg_version }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty CHECK_ONDISK_DATA_COMPATIBILITY: nonempty

View File

@@ -105,7 +105,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project - name: Create Neon Project
id: create-neon-project id: create-neon-project
@@ -123,7 +122,7 @@ jobs:
run_in_parallel: false run_in_parallel: false
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# Set --sparse-ordering option of pytest-order plugin # Set --sparse-ordering option of pytest-order plugin
# to ensure tests are running in order of appears in the file. # to ensure tests are running in order of appears in the file.
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests # It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
@@ -153,7 +152,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -205,7 +204,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Run Logical Replication benchmarks - name: Run Logical Replication benchmarks
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
@@ -216,7 +214,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 5400 extra_params: -m remote_cluster --timeout 5400
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -233,7 +231,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 5400 extra_params: -m remote_cluster --timeout 5400
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -245,7 +243,7 @@ jobs:
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
store-test-results-into-db: true store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
@@ -407,7 +405,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project - name: Create Neon Project
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform) if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
@@ -455,7 +452,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -470,7 +467,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -485,7 +482,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -503,7 +500,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -614,7 +611,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -629,7 +626,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 extra_params: -m remote_cluster --timeout 21600
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -640,7 +637,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -711,7 +708,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Set up Connection String - name: Set up Connection String
id: set-up-connstr id: set-up-connstr
@@ -743,7 +739,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -757,7 +753,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -822,7 +818,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Get Connstring Secret Name - name: Get Connstring Secret Name
run: | run: |
@@ -861,7 +856,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_tpch extra_params: -m remote_cluster --timeout 21600 -k test_tpch
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -873,7 +868,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -931,7 +926,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Set up Connection String - name: Set up Connection String
id: set-up-connstr id: set-up-connstr
@@ -963,7 +957,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -974,7 +968,7 @@ jobs:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}

View File

@@ -21,6 +21,8 @@ concurrency:
env: env:
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
COPT: '-Werror' COPT: '-Werror'
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix # A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
@@ -253,15 +255,15 @@ jobs:
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
build-tag: ${{ needs.tag.outputs.build-tag }} build-tag: ${{ needs.tag.outputs.build-tag }}
build-type: ${{ matrix.build-type }} build-type: ${{ matrix.build-type }}
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds. # Run tests on all Postgres versions in release builds and only on the latest version in debug builds
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled. # run without LFC on v17 release only
test-cfg: | test-cfg: |
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"}, ${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
{"pg_version":"v15", "lfc_state": "with-lfc"}, {"pg_version":"v15", "lfc_state": "without-lfc"},
{"pg_version":"v16", "lfc_state": "with-lfc"}, {"pg_version":"v16", "lfc_state": "without-lfc"},
{"pg_version":"v17", "lfc_state": "with-lfc"}, {"pg_version":"v17", "lfc_state": "without-lfc"},
{"pg_version":"v17", "lfc_state": "without-lfc"}]' {"pg_version":"v17", "lfc_state": "with-lfc"}]'
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }} || '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
secrets: inherit secrets: inherit
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking # Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
@@ -303,11 +305,6 @@ jobs:
benchmarks: benchmarks:
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks') if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ] needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: [ self-hosted, small ] runs-on: [ self-hosted, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
@@ -336,7 +333,6 @@ jobs:
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }} extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }} benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
pg_version: v16 pg_version: v16
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -349,11 +345,6 @@ jobs:
report-benchmarks-failures: report-benchmarks-failures:
needs: [ benchmarks, create-test-report ] needs: [ benchmarks, create-test-report ]
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure' if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
@@ -369,11 +360,6 @@ jobs:
create-test-report: create-test-report:
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ] needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }} if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
outputs: outputs:
report-url: ${{ steps.create-allure-report.outputs.report-url }} report-url: ${{ steps.create-allure-report.outputs.report-url }}
@@ -394,7 +380,6 @@ jobs:
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
store-test-results-into-db: true store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
@@ -426,10 +411,6 @@ jobs:
coverage-report: coverage-report:
if: ${{ !startsWith(github.ref_name, 'release') }} if: ${{ !startsWith(github.ref_name, 'release') }}
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ] needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
runs-on: [ self-hosted, small ] runs-on: [ self-hosted, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
@@ -456,14 +437,12 @@ jobs:
with: with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
path: /tmp/neon path: /tmp/neon
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Get coverage artifact - name: Get coverage artifact
uses: ./.github/actions/download uses: ./.github/actions/download
with: with:
name: coverage-data-artifact name: coverage-data-artifact
path: /tmp/coverage path: /tmp/coverage
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Merge coverage data - name: Merge coverage data
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
@@ -594,10 +573,6 @@ jobs:
neon-image: neon-image:
needs: [ neon-image-arch, tag ] needs: [ neon-image-arch, tag ]
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
steps: steps:
- uses: docker/login-action@v3 - uses: docker/login-action@v3
@@ -612,15 +587,11 @@ jobs:
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \ neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64 neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
- name: Configure AWS credentials - uses: docker/login-action@v3
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
role-duration-seconds: 3600 password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Push multi-arch image to ECR - name: Push multi-arch image to ECR
run: | run: |
@@ -629,10 +600,6 @@ jobs:
compute-node-image-arch: compute-node-image-arch:
needs: [ check-permissions, build-build-tools-image, tag ] needs: [ check-permissions, build-build-tools-image, tag ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -673,15 +640,11 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials - uses: docker/login-action@v3
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
role-duration-seconds: 3600 password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- uses: docker/login-action@v3 - uses: docker/login-action@v3
with: with:
@@ -754,10 +717,6 @@ jobs:
compute-node-image: compute-node-image:
needs: [ compute-node-image-arch, tag ] needs: [ compute-node-image-arch, tag ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
strategy: strategy:
@@ -802,15 +761,11 @@ jobs:
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \ neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64 neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
- name: Configure AWS credentials - uses: docker/login-action@v3
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
role-duration-seconds: 3600 password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR - name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
run: | run: |
@@ -840,7 +795,7 @@ jobs:
- pg: v17 - pg: v17
debian: bookworm debian: bookworm
env: env:
VM_BUILDER_VERSION: v0.37.1 VM_BUILDER_VERSION: v0.35.0
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@@ -935,9 +890,7 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
permissions: permissions:
id-token: write # aws-actions/configure-aws-credentials id-token: write # for `aws-actions/configure-aws-credentials`
statuses: write
contents: read
env: env:
VERSIONS: v14 v15 v16 v17 VERSIONS: v14 v15 v16 v17
@@ -948,15 +901,12 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials - name: Login to dev ECR
uses: aws-actions/configure-aws-credentials@v4 uses: docker/login-action@v3
with: with:
aws-region: eu-central-1 registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
role-duration-seconds: 3600 password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Copy vm-compute-node images to ECR - name: Copy vm-compute-node images to ECR
run: | run: |
@@ -1035,11 +985,6 @@ jobs:
trigger-custom-extensions-build-and-wait: trigger-custom-extensions-build-and-wait:
needs: [ check-permissions, tag ] needs: [ check-permissions, tag ]
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
steps: steps:
- name: Set PR's status to pending and request a remote CI test - name: Set PR's status to pending and request a remote CI test
run: | run: |
@@ -1115,79 +1060,12 @@ jobs:
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ] needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod` # `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled() if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
runs-on: [ self-hosted, small ] runs-on: [ self-hosted, small ]
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Create git tag and GitHub release
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
uses: actions/github-script@v7
with:
retries: 5
script: |
const tag = "${{ needs.tag.outputs.build-tag }}";
try {
const existingRef = await github.rest.git.getRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `tags/${tag}`,
});
if (existingRef.data.object.sha !== context.sha) {
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
}
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
} catch (error) {
if (error.status !== 404) {
throw error;
}
console.log(`Tag ${tag} does not exist. Creating it...`);
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `refs/tags/${tag}`,
sha: context.sha,
});
console.log(`Tag ${tag} created successfully.`);
}
// TODO: check how GitHub releases looks for proxy/compute releases and enable them if they're ok
if (context.ref !== 'refs/heads/release') {
console.log(`GitHub release skipped for ${context.ref}.`);
return;
}
try {
const existingRelease = await github.rest.repos.getReleaseByTag({
owner: context.repo.owner,
repo: context.repo.repo,
tag: tag,
});
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
} catch (error) {
if (error.status !== 404) {
throw error;
}
console.log(`Release for tag ${tag} does not exist. Creating it...`);
await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
generate_release_notes: true,
});
console.log(`Release for tag ${tag} created successfully.`);
}
- name: Trigger deploy workflow - name: Trigger deploy workflow
env: env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
@@ -1237,13 +1115,38 @@ jobs:
exit 1 exit 1
fi fi
- name: Create git tag
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
uses: actions/github-script@v7
with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5
script: |
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
sha: context.sha,
})
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
- name: Create GitHub release
if: github.ref_name == 'release'
uses: actions/github-script@v7
with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5
script: |
await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: "${{ needs.tag.outputs.build-tag }}",
generate_release_notes: true,
})
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory # The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
promote-compatibility-data: promote-compatibility-data:
needs: [ deploy ] needs: [ deploy ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod` # `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
if: github.ref_name == 'release' && !failure() && !cancelled() if: github.ref_name == 'release' && !failure() && !cancelled()
@@ -1280,12 +1183,6 @@ jobs:
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT} echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT} echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
- uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Promote compatibility snapshot and Neon artifact - name: Promote compatibility snapshot and Neon artifact
env: env:
BUCKET: neon-github-public-dev BUCKET: neon-github-public-dev

View File

@@ -19,21 +19,15 @@ concurrency:
group: ${{ github.workflow }} group: ${{ github.workflow }}
cancel-in-progress: true cancel-in-progress: true
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
jobs: jobs:
regress: regress:
env: env:
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 16
TEST_OUTPUT: /tmp/test_output TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote BUILD_TYPE: remote
strategy: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
fail-fast: false AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
matrix:
pg-version: [16, 17]
runs-on: us-east-2 runs-on: us-east-2
container: container:
@@ -46,11 +40,9 @@ jobs:
submodules: true submodules: true
- name: Patch the test - name: Patch the test
env:
PG_VERSION: ${{matrix.pg-version}}
run: | run: |
cd "vendor/postgres-v${PG_VERSION}" cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch" patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
- name: Generate a random password - name: Generate a random password
id: pwgen id: pwgen
@@ -63,9 +55,8 @@ jobs:
- name: Change tests according to the generated password - name: Change tests according to the generated password
env: env:
DBPASS: ${{ steps.pwgen.outputs.DBPASS }} DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
PG_VERSION: ${{matrix.pg-version}}
run: | run: |
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
for fname in sql/*.sql expected/*.out; do for fname in sql/*.sql expected/*.out; do
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}" sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
done done
@@ -81,46 +72,27 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create a new branch
id: create-branch
uses: ./.github/actions/neon-branch-create
with:
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
- name: Run the regression tests - name: Run the regression tests
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
with: with:
build_type: ${{ env.BUILD_TYPE }} build_type: ${{ env.BUILD_TYPE }}
test_selection: cloud_regress test_selection: cloud_regress
pg_version: ${{matrix.pg-version}} pg_version: ${{ env.DEFAULT_PG_VERSION }}
extra_params: -m remote_cluster extra_params: -m remote_cluster
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}} BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
- name: Delete branch
if: always()
uses: ./.github/actions/neon-branch-delete
with:
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
branch_id: ${{steps.create-branch.outputs.branch_id}}
- name: Create Allure report - name: Create Allure report
id: create-allure-report id: create-allure-report
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
uses: slackapi/slack-github-action@v1 uses: slackapi/slack-github-action@v1
with: with:
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }} channel-id: "C033QLM5P7D" # on-call-staging-stream
slack-message: | slack-message: |
Periodic pg_regress on staging: ${{ job.status }} Periodic pg_regress on staging: ${{ job.status }}
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run> <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>

View File

@@ -13,7 +13,7 @@ on:
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
- cron: '0 9 * * *' # run once a day, timezone is utc - cron: '0 9 * * *' # run once a day, timezone is utc
workflow_dispatch: # adds ability to run this manually workflow_dispatch: # adds ability to run this manually
defaults: defaults:
run: run:
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
@@ -28,7 +28,7 @@ jobs:
strategy: strategy:
fail-fast: false # allow other variants to continue even if one fails fail-fast: false # allow other variants to continue even if one fails
matrix: matrix:
target_project: [new_empty_project, large_existing_project] target_project: [new_empty_project, large_existing_project]
permissions: permissions:
contents: write contents: write
statuses: write statuses: write
@@ -56,7 +56,7 @@ jobs:
with: with:
aws-region: eu-central-1 aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download
@@ -64,7 +64,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project - name: Create Neon Project
if: ${{ matrix.target_project == 'new_empty_project' }} if: ${{ matrix.target_project == 'new_empty_project' }}
@@ -95,7 +94,7 @@ jobs:
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }} project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
api_key: ${{ secrets.NEON_STAGING_API_KEY }} api_key: ${{ secrets.NEON_STAGING_API_KEY }}
- name: Initialize Neon project - name: Initialize Neon project
if: ${{ matrix.target_project == 'large_existing_project' }} if: ${{ matrix.target_project == 'large_existing_project' }}
env: env:
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }} BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
@@ -123,7 +122,7 @@ jobs:
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;" ${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
- name: Invoke pgcopydb - name: Invoke pgcopydb
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
with: with:
build_type: remote build_type: remote
@@ -132,7 +131,7 @@ jobs:
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
pg_version: v16 pg_version: v16
save_perf_report: true save_perf_report: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }} BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
TARGET_PROJECT_TYPE: ${{ matrix.target_project }} TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
@@ -144,7 +143,7 @@ jobs:
run: | run: |
export LD_LIBRARY_PATH=${PG_16_LIB_PATH} export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+" ${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
- name: Delete Neon Project - name: Delete Neon Project
if: ${{ always() && matrix.target_project == 'new_empty_project' }} if: ${{ always() && matrix.target_project == 'new_empty_project' }}
uses: ./.github/actions/neon-project-delete uses: ./.github/actions/neon-project-delete

View File

@@ -143,10 +143,6 @@ jobs:
gather-rust-build-stats: gather-rust-build-stats:
needs: [ check-permissions, build-build-tools-image ] needs: [ check-permissions, build-build-tools-image ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
if: | if: |
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
@@ -181,18 +177,13 @@ jobs:
- name: Produce the build stats - name: Produce the build stats
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc) run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Upload the build stats - name: Upload the build stats
id: upload-stats id: upload-stats
env: env:
BUCKET: neon-github-public-dev BUCKET: neon-github-public-dev
SHA: ${{ github.event.pull_request.head.sha || github.sha }} SHA: ${{ github.event.pull_request.head.sha || github.sha }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
run: | run: |
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/" aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"

View File

@@ -27,11 +27,6 @@ concurrency:
jobs: jobs:
trigger_bench_on_ec2_machine_in_eu_central_1: trigger_bench_on_ec2_machine_in_eu_central_1:
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: [ self-hosted, small ] runs-on: [ self-hosted, small ]
container: container:
image: neondatabase/build-tools:pinned-bookworm image: neondatabase/build-tools:pinned-bookworm
@@ -43,6 +38,8 @@ jobs:
env: env:
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }} API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
RUN_ID: ${{ github.run_id }} RUN_ID: ${{ github.run_id }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
AWS_DEFAULT_REGION : "eu-central-1" AWS_DEFAULT_REGION : "eu-central-1"
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74" AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
steps: steps:
@@ -53,13 +50,6 @@ jobs:
- name: Show my own (github runner) external IP address - usefull for IP allowlisting - name: Show my own (github runner) external IP address - usefull for IP allowlisting
run: curl https://ifconfig.me run: curl https://ifconfig.me
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
role-duration-seconds: 3600
- name: Start EC2 instance and wait for the instance to boot up - name: Start EC2 instance and wait for the instance to boot up
run: | run: |
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
@@ -134,10 +124,11 @@ jobs:
cat "test_log_${GITHUB_RUN_ID}" cat "test_log_${GITHUB_RUN_ID}"
- name: Create Allure report - name: Create Allure report
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel - name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }} if: ${{ github.event.schedule && failure() }}
@@ -157,14 +148,6 @@ jobs:
-H "Authorization: Bearer $API_KEY" \ -H "Authorization: Bearer $API_KEY" \
-d '' -d ''
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
role-duration-seconds: 3600
- name: Stop EC2 instance and wait for the instance to be stopped - name: Stop EC2 instance and wait for the instance to be stopped
if: always() && steps.poll_step.outputs.too_many_runs != 'true' if: always() && steps.poll_step.outputs.too_many_runs != 'true'
run: | run: |

View File

@@ -25,13 +25,11 @@ defaults:
run: run:
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write # require for posting a status update
env: env:
DEFAULT_PG_VERSION: 16 DEFAULT_PG_VERSION: 16
PLATFORM: neon-captest-new PLATFORM: neon-captest-new
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
AWS_DEFAULT_REGION: eu-central-1 AWS_DEFAULT_REGION: eu-central-1
jobs: jobs:
@@ -96,7 +94,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project - name: Create Neon Project
id: create-neon-project id: create-neon-project
@@ -113,7 +110,6 @@ jobs:
run_in_parallel: false run_in_parallel: false
extra_params: -m remote_cluster extra_params: -m remote_cluster
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }} BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
@@ -130,7 +126,6 @@ jobs:
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
store-test-results-into-db: true store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
@@ -164,7 +159,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/ path: /tmp/neon/
prefix: latest prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project - name: Create Neon Project
id: create-neon-project id: create-neon-project
@@ -181,7 +175,6 @@ jobs:
run_in_parallel: false run_in_parallel: false
extra_params: -m remote_cluster extra_params: -m remote_cluster
pg_version: ${{ env.DEFAULT_PG_VERSION }} pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }} BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
@@ -198,7 +191,6 @@ jobs:
uses: ./.github/actions/allure-report-generate uses: ./.github/actions/allure-report-generate
with: with:
store-test-results-into-db: true store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env: env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}

View File

@@ -67,7 +67,7 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
permissions: permissions:
id-token: write # for `azure/login` and aws auth id-token: write # for `azure/login`
steps: steps:
- uses: docker/login-action@v3 - uses: docker/login-action@v3
@@ -75,15 +75,11 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials - uses: docker/login-action@v3
uses: aws-actions/configure-aws-credentials@v4
with: with:
aws-region: eu-central-1 registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
role-duration-seconds: 3600 password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Azure login - name: Azure login
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1 uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1

View File

@@ -63,7 +63,6 @@ jobs:
if: always() if: always()
permissions: permissions:
statuses: write # for `github.repos.createCommitStatus(...)` statuses: write # for `github.repos.createCommitStatus(...)`
contents: write
needs: needs:
- get-changed-files - get-changed-files
- check-codestyle-python - check-codestyle-python

View File

@@ -3,7 +3,7 @@ name: Create Release Branch
on: on:
schedule: schedule:
# It should be kept in sync with if-condition in jobs # It should be kept in sync with if-condition in jobs
- cron: '0 6 * * FRI' # Storage release - cron: '0 6 * * MON' # Storage release
- cron: '0 6 * * THU' # Proxy release - cron: '0 6 * * THU' # Proxy release
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@@ -29,7 +29,7 @@ defaults:
jobs: jobs:
create-storage-release-branch: create-storage-release-branch:
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }} if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
permissions: permissions:
contents: write contents: write

View File

@@ -1,29 +1,16 @@
# Autoscaling
/libs/vm_monitor/ @neondatabase/autoscaling
# DevProd
/.github/ @neondatabase/developer-productivity /.github/ @neondatabase/developer-productivity
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
# Compute /libs/pageserver_api/ @neondatabase/storage
/pgxn/ @neondatabase/compute /libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
/vendor/ @neondatabase/compute
/compute/ @neondatabase/compute
/compute_tools/ @neondatabase/compute
# Proxy
/libs/proxy/ @neondatabase/proxy /libs/proxy/ @neondatabase/proxy
/proxy/ @neondatabase/proxy /libs/remote_storage/ @neondatabase/storage
/libs/safekeeper_api/ @neondatabase/storage
# Storage /libs/vm_monitor/ @neondatabase/autoscaling
/pageserver/ @neondatabase/storage /pageserver/ @neondatabase/storage
/pgxn/ @neondatabase/compute
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
/proxy/ @neondatabase/proxy
/safekeeper/ @neondatabase/storage /safekeeper/ @neondatabase/storage
/storage_controller @neondatabase/storage /storage_controller @neondatabase/storage
/storage_scrubber @neondatabase/storage /storage_scrubber @neondatabase/storage
/libs/pageserver_api/ @neondatabase/storage /vendor/ @neondatabase/compute
/libs/remote_storage/ @neondatabase/storage
/libs/safekeeper_api/ @neondatabase/storage
# Shared
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage

639
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ members = [
"pageserver/pagebench", "pageserver/pagebench",
"proxy", "proxy",
"safekeeper", "safekeeper",
"safekeeper/client",
"storage_broker", "storage_broker",
"storage_controller", "storage_controller",
"storage_controller/client", "storage_controller/client",
@@ -52,7 +51,10 @@ anyhow = { version = "1.0", features = ["backtrace"] }
arc-swap = "1.6" arc-swap = "1.6"
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] } async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
atomic-take = "1.1.0" atomic-take = "1.1.0"
backtrace = "0.3.74" azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
flate2 = "1.0.26" flate2 = "1.0.26"
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
@@ -214,12 +216,6 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" } postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" } tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
## Azure SDK crates
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
## Local libraries ## Local libraries
compute_api = { version = "0.1", path = "./libs/compute_api/" } compute_api = { version = "0.1", path = "./libs/compute_api/" }
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" } consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
@@ -235,7 +231,6 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
pq_proto = { version = "0.1", path = "./libs/pq_proto/" } pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
remote_storage = { version = "0.1", path = "./libs/remote_storage/" } remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" } safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
safekeeper_client = { path = "./safekeeper/client" }
desim = { version = "0.1", path = "./libs/desim" } desim = { version = "0.1", path = "./libs/desim" }
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy. storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
storage_controller_client = { path = "./storage_controller/client" } storage_controller_client = { path = "./storage_controller/client" }

View File

@@ -115,7 +115,7 @@ RUN set -e \
# Keep the version the same as in compute/compute-node.Dockerfile and # Keep the version the same as in compute/compute-node.Dockerfile and
# test_runner/regress/test_compute_metrics.py. # test_runner/regress/test_compute_metrics.py.
ENV SQL_EXPORTER_VERSION=0.16.0 ENV SQL_EXPORTER_VERSION=0.13.1
RUN curl -fsSL \ RUN curl -fsSL \
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \ "https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
--output sql_exporter.tar.gz \ --output sql_exporter.tar.gz \

View File

@@ -35,12 +35,10 @@ RUN case $DEBIAN_VERSION in \
;; \ ;; \
esac && \ esac && \
apt update && \ apt update && \
apt install --no-install-recommends --no-install-suggests -y \ apt install --no-install-recommends -y git autoconf automake libtool build-essential bison flex libreadline-dev \
ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \ zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \ libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
$VERSION_INSTALLS \ $VERSION_INSTALLS
&& apt clean && rm -rf /var/lib/apt/lists/*
######################################################################################### #########################################################################################
# #
@@ -115,12 +113,10 @@ ARG DEBIAN_VERSION
ARG PG_VERSION ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \ RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y \ apt install --no-install-recommends -y gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \ libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \ libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
protobuf-c-compiler xsltproc \ protobuf-c-compiler xsltproc
&& apt clean && rm -rf /var/lib/apt/lists/*
# Postgis 3.5.0 requires SFCGAL 1.4+ # Postgis 3.5.0 requires SFCGAL 1.4+
@@ -147,9 +143,9 @@ RUN case "${DEBIAN_VERSION}" in \
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \ wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \ echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \ mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
cmake -DCMAKE_BUILD_TYPE=Release -GNinja . && ninja -j $(getconf _NPROCESSORS_ONLN) && \ cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
DESTDIR=/sfcgal ninja install -j $(getconf _NPROCESSORS_ONLN) && \ DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
ninja clean && cp -R /sfcgal/* / make clean && cp -R /sfcgal/* /
ENV PATH="/usr/local/pgsql/bin:$PATH" ENV PATH="/usr/local/pgsql/bin:$PATH"
@@ -217,9 +213,9 @@ RUN case "${PG_VERSION}" in \
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \ echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \ mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
mkdir build && cd build && \ mkdir build && cd build && \
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \ cmake -DCMAKE_BUILD_TYPE=Release .. && \
ninja -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
ninja -j $(getconf _NPROCESSORS_ONLN) install && \ make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\ find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \ cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
@@ -239,9 +235,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
RUN apt update && \ RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y \ apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
ninja-build python3-dev libncurses5 binutils clang \
&& apt clean && rm -rf /var/lib/apt/lists/*
# plv8 3.2.3 supports v17 # plv8 3.2.3 supports v17
# last release v3.2.3 - Sep 7, 2024 # last release v3.2.3 - Sep 7, 2024
@@ -307,10 +301,9 @@ RUN mkdir -p /h3/usr/ && \
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \ echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \ mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
mkdir build && cd build && \ mkdir build && cd build && \
cmake .. -GNinja -DBUILD_BENCHMARKS=0 -DCMAKE_BUILD_TYPE=Release \ cmake .. -DCMAKE_BUILD_TYPE=Release && \
-DBUILD_FUZZERS=0 -DBUILD_FILTERS=0 -DBUILD_GENERATORS=0 -DBUILD_TESTING=0 \ make -j $(getconf _NPROCESSORS_ONLN) && \
&& ninja -j $(getconf _NPROCESSORS_ONLN) && \ DESTDIR=/h3 make install && \
DESTDIR=/h3 ninja install && \
cp -R /h3/usr / && \ cp -R /h3/usr / && \
rm -rf build rm -rf build
@@ -657,15 +650,14 @@ FROM build-deps AS rdkit-pg-build
ARG PG_VERSION ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \ RUN apt-get update && \
apt install --no-install-recommends --no-install-suggests -y \ apt-get install --no-install-recommends -y \
libboost-iostreams1.74-dev \ libboost-iostreams1.74-dev \
libboost-regex1.74-dev \ libboost-regex1.74-dev \
libboost-serialization1.74-dev \ libboost-serialization1.74-dev \
libboost-system1.74-dev \ libboost-system1.74-dev \
libeigen3-dev \ libeigen3-dev \
libboost-all-dev \ libboost-all-dev
&& apt clean && rm -rf /var/lib/apt/lists/*
# rdkit Release_2024_09_1 supports v17 # rdkit Release_2024_09_1 supports v17
# last release Release_2024_09_1 - Sep 27, 2024 # last release Release_2024_09_1 - Sep 27, 2024
@@ -701,8 +693,6 @@ RUN case "${PG_VERSION}" in \
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \ -D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \ -D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \ -D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
-D RDK_TEST_MULTITHREADED=OFF \
-D RDK_BUILD_CPP_TESTS=OFF \
-D RDK_USE_URF=OFF \ -D RDK_USE_URF=OFF \
-D RDK_BUILD_PGSQL=ON \ -D RDK_BUILD_PGSQL=ON \
-D RDK_PGSQL_STATIC=ON \ -D RDK_PGSQL_STATIC=ON \
@@ -714,10 +704,9 @@ RUN case "${PG_VERSION}" in \
-D RDK_INSTALL_COMIC_FONTS=OFF \ -D RDK_INSTALL_COMIC_FONTS=OFF \
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \ -D RDK_BUILD_FREETYPE_SUPPORT=OFF \
-D CMAKE_BUILD_TYPE=Release \ -D CMAKE_BUILD_TYPE=Release \
-GNinja \
. && \ . && \
ninja -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
ninja -j $(getconf _NPROCESSORS_ONLN) install && \ make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
######################################################################################### #########################################################################################
@@ -860,9 +849,8 @@ FROM build-deps AS rust-extensions-build
ARG PG_VERSION ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \ RUN apt-get update && \
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \ apt-get install --no-install-recommends -y curl libclang-dev && \
apt clean && rm -rf /var/lib/apt/lists/* && \
useradd -ms /bin/bash nonroot -b /home useradd -ms /bin/bash nonroot -b /home
ENV HOME=/home/nonroot ENV HOME=/home/nonroot
@@ -897,9 +885,8 @@ FROM build-deps AS rust-extensions-build-pgrx12
ARG PG_VERSION ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \ RUN apt-get update && \
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \ apt-get install --no-install-recommends -y curl libclang-dev && \
apt clean && rm -rf /var/lib/apt/lists/* && \
useradd -ms /bin/bash nonroot -b /home useradd -ms /bin/bash nonroot -b /home
ENV HOME=/home/nonroot ENV HOME=/home/nonroot
@@ -927,22 +914,18 @@ FROM rust-extensions-build-pgrx12 AS pg-onnx-build
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25). # cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise # Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
RUN apt update && apt install --no-install-recommends --no-install-suggests -y \ RUN apt-get update && apt-get install -y python3 python3-pip python3-venv && \
python3 python3-pip python3-venv && \
apt clean && rm -rf /var/lib/apt/lists/* && \
python3 -m venv venv && \ python3 -m venv venv && \
. venv/bin/activate && \ . venv/bin/activate && \
python3 -m pip install cmake==3.30.5 && \ python3 -m pip install cmake==3.30.5 && \
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \ wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \ mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
./build.sh --config Release --parallel --cmake_generator Ninja \ ./build.sh --config Release --parallel --skip_submodule_sync --skip_tests --allow_running_as_root
--skip_submodule_sync --skip_tests --allow_running_as_root
FROM pg-onnx-build AS pgrag-pg-build FROM pg-onnx-build AS pgrag-pg-build
RUN apt update && apt install --no-install-recommends --no-install-suggests -y protobuf-compiler \ RUN apt-get install -y protobuf-compiler && \
&& apt clean && rm -rf /var/lib/apt/lists/* && \
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \ wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \ echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \ mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
@@ -1185,25 +1168,6 @@ RUN case "${PG_VERSION}" in \
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \ make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
#########################################################################################
#
# Layer "pg_repack"
# compile pg_repack extension
#
#########################################################################################
FROM build-deps AS pg-repack-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
ENV PATH="/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/reorg/pg_repack/archive/refs/tags/ver_1.5.2.tar.gz -O pg_repack.tar.gz && \
echo '4516cad42251ed3ad53ff619733004db47d5755acac83f75924cd94d1c4fb681 pg_repack.tar.gz' | sha256sum --check && \
mkdir pg_repack-src && cd pg_repack-src && tar xzf ../pg_repack.tar.gz --strip-components=1 -C . && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install
######################################################################################### #########################################################################################
# #
# Layer "neon-pg-ext-build" # Layer "neon-pg-ext-build"
@@ -1249,7 +1213,6 @@ COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-repack-build /usr/local/pgsql/ /usr/local/pgsql/
COPY pgxn/ pgxn/ COPY pgxn/ pgxn/
RUN make -j $(getconf _NPROCESSORS_ONLN) \ RUN make -j $(getconf _NPROCESSORS_ONLN) \
@@ -1316,8 +1279,8 @@ COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_
FROM debian:$DEBIAN_FLAVOR AS pgbouncer FROM debian:$DEBIAN_FLAVOR AS pgbouncer
RUN set -e \ RUN set -e \
&& apt update \ && apt-get update \
&& apt install --no-install-suggests --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
build-essential \ build-essential \
git \ git \
ca-certificates \ ca-certificates \
@@ -1325,8 +1288,7 @@ RUN set -e \
automake \ automake \
libevent-dev \ libevent-dev \
libtool \ libtool \
pkg-config \ pkg-config
&& apt clean && rm -rf /var/lib/apt/lists/*
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc) # Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
ENV PGBOUNCER_TAG=pgbouncer_1_22_1 ENV PGBOUNCER_TAG=pgbouncer_1_22_1
@@ -1362,7 +1324,7 @@ FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter
# Keep the version the same as in build-tools.Dockerfile and # Keep the version the same as in build-tools.Dockerfile and
# test_runner/regress/test_compute_metrics.py. # test_runner/regress/test_compute_metrics.py.
FROM burningalchemist/sql_exporter:0.16.0 AS sql-exporter FROM burningalchemist/sql_exporter:0.13.1 AS sql-exporter
######################################################################################### #########################################################################################
# #
@@ -1557,7 +1519,7 @@ RUN apt update && \
procps \ procps \
ca-certificates \ ca-certificates \
$VERSION_INSTALLS && \ $VERSION_INSTALLS && \
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2 # s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2

View File

@@ -3,7 +3,7 @@
metrics: [ metrics: [
import 'sql_exporter/checkpoints_req.libsonnet', import 'sql_exporter/checkpoints_req.libsonnet',
import 'sql_exporter/checkpoints_timed.libsonnet', import 'sql_exporter/checkpoints_timed.libsonnet',
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet', import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
import 'sql_exporter/compute_current_lsn.libsonnet', import 'sql_exporter/compute_current_lsn.libsonnet',
import 'sql_exporter/compute_logical_snapshot_files.libsonnet', import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet', import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',

View File

@@ -19,10 +19,3 @@ max_prepared_statements=0
admin_users=postgres admin_users=postgres
unix_socket_dir=/tmp/ unix_socket_dir=/tmp/
unix_socket_mode=0777 unix_socket_mode=0777
;; Disable connection logging. It produces a lot of logs that no one looks at,
;; and we can get similar log entries from the proxy too. We had incidents in
;; the past where the logging significantly stressed the log device or pgbouncer
;; itself.
log_connections=0
log_disconnections=0

View File

@@ -1,10 +1,10 @@
{ {
metric_name: 'compute_backpressure_throttling_seconds_total', metric_name: 'compute_backpressure_throttling_seconds',
type: 'counter', type: 'gauge',
help: 'Time compute has spent throttled', help: 'Time compute has spent throttled',
key_labels: null, key_labels: null,
values: [ values: [
'throttled', 'throttled',
], ],
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql', query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
} }

View File

@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
CREATE SCHEMA addr_nsp; CREATE SCHEMA addr_nsp;
SET search_path TO 'addr_nsp'; SET search_path TO 'addr_nsp';
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
index 8475231735..0653946337 100644 index 8475231735..1afae5395f 100644
--- a/src/test/regress/expected/password.out --- a/src/test/regress/expected/password.out
+++ b/src/test/regress/expected/password.out +++ b/src/test/regress/expected/password.out
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok @@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
@@ -1006,63 +1006,65 @@ index 8475231735..0653946337 100644
-----------------+--------------------------------------------------- -----------------+---------------------------------------------------
- regress_passwd1 | md5783277baca28003b33453252be4dbb34 - regress_passwd1 | md5783277baca28003b33453252be4dbb34
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3 - regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1 + regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2 + regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd4 | - regress_passwd4 |
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> + regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
(4 rows) (4 rows)
-- Rename a role -- Rename a role
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2; @@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
-- passwords. -- passwords.
SET password_encryption = 'md5'; SET password_encryption = 'md5';
-- encrypt with MD5 -- encrypt with MD5
-ALTER ROLE regress_passwd2 PASSWORD 'foo'; -ALTER ROLE regress_passwd2 PASSWORD 'foo';
--- already encrypted, use as they are
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; +ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted, use as they are
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
SET password_encryption = 'scram-sha-256'; SET password_encryption = 'scram-sha-256';
-- create SCRAM secret -- create SCRAM secret
-ALTER ROLE regress_passwd4 PASSWORD 'foo'; -ALTER ROLE regress_passwd4 PASSWORD 'foo';
--- already encrypted with MD5, use as it is
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
--- so it should be hashed with SCRAM-SHA-256.
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
--- These may look like valid MD5 secrets, but they are not, so they
--- should be hashed with SCRAM-SHA-256.
--- trailing garbage at the end
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
--- invalid length
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; +ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+-- Neon does not support encrypted passwords, use unencrypted instead -- already encrypted with MD5, use as it is
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER; CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
+-- Neon does not support encrypted passwords, use unencrypted instead +ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER; -- This looks like a valid SCRAM-SHA-256 secret, but it is not
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER; -- so it should be hashed with SCRAM-SHA-256.
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER; CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- These may look like valid MD5 secrets, but they are not, so they
-- should be hashed with SCRAM-SHA-256.
-- trailing garbage at the end
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- invalid length
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- Changing the SCRAM iteration count -- Changing the SCRAM iteration count
SET scram_iterations = 1024; SET scram_iterations = 1024;
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount'; CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+ @@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
ORDER BY rolname, rolpassword; ORDER BY rolname, rolpassword;
rolname | rolpassword_masked rolname | rolpassword_masked
-----------------+--------------------------------------------------- -----------------+---------------------------------------------------
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70 - regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb - regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1 + regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2 + regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023 - regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> - regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> - regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> - regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey> regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+ -(9 rows)
+(5 rows)
-- An empty password is not allowed, in any form -- An empty password is not allowed, in any form
CREATE ROLE regress_passwd_empty PASSWORD ''; CREATE ROLE regress_passwd_empty PASSWORD '';
NOTICE: empty string is not a valid password, clearing password NOTICE: empty string is not a valid password, clearing password
@@ -1080,37 +1082,56 @@ index 8475231735..0653946337 100644
-(1 row) -(1 row)
+(0 rows) +(0 rows)
--- Test with invalid stored and server keys. -- Test with invalid stored and server keys.
--- --
--- The first is valid, to act as a control. The others have too long -- The first is valid, to act as a control. The others have too long
--- stored/server keys. They will be re-hashed. -- stored/server keys. They will be re-hashed.
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; +ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='; CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
+-- Neon does not support encrypted passwords, use unencrypted instead +ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER; CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER; +ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Check that the invalid secrets were re-hashed. A re-hashed secret -- Check that the invalid secrets were re-hashed. A re-hashed secret
-- should not contain the original salt. -- should not contain the original salt.
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw FROM pg_authid
WHERE rolname LIKE 'regress_passwd_sha_len%'
ORDER BY rolname; ORDER BY rolname;
rolname | is_rolpassword_rehashed - rolname | is_rolpassword_rehashed
-------------------------+------------------------- --------------------------+-------------------------
- regress_passwd_sha_len0 | f - regress_passwd_sha_len0 | f
+ regress_passwd_sha_len0 | t - regress_passwd_sha_len1 | t
regress_passwd_sha_len1 | t - regress_passwd_sha_len2 | t
regress_passwd_sha_len2 | t -(3 rows)
(3 rows) + rolname | is_rolpassword_rehashed
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7; +---------+-------------------------
+(0 rows)
DROP ROLE regress_passwd1;
DROP ROLE regress_passwd2;
DROP ROLE regress_passwd3;
DROP ROLE regress_passwd4;
DROP ROLE regress_passwd5;
+ERROR: role "regress_passwd5" does not exist
DROP ROLE regress_passwd6;
+ERROR: role "regress_passwd6" does not exist
DROP ROLE regress_passwd7;
+ERROR: role "regress_passwd7" does not exist
DROP ROLE regress_passwd8; DROP ROLE regress_passwd8;
+ERROR: role "regress_passwd8" does not exist
DROP ROLE regress_passwd9; DROP ROLE regress_passwd9;
DROP ROLE regress_passwd_empty; DROP ROLE regress_passwd_empty;
+ERROR: role "regress_passwd_empty" does not exist +ERROR: role "regress_passwd_empty" does not exist
DROP ROLE regress_passwd_sha_len0; DROP ROLE regress_passwd_sha_len0;
+ERROR: role "regress_passwd_sha_len0" does not exist
DROP ROLE regress_passwd_sha_len1; DROP ROLE regress_passwd_sha_len1;
+ERROR: role "regress_passwd_sha_len1" does not exist
DROP ROLE regress_passwd_sha_len2; DROP ROLE regress_passwd_sha_len2;
+ERROR: role "regress_passwd_sha_len2" does not exist
-- all entries should have been removed
SELECT rolname, rolpassword
FROM pg_authid
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index 5b9dba7b32..cc408dad42 100644 index 5b9dba7b32..cc408dad42 100644
--- a/src/test/regress/expected/privileges.out --- a/src/test/regress/expected/privileges.out
@@ -3173,7 +3194,7 @@ index 1a6c61f49d..1c31ac6a53 100644
-- Test generic object addressing/identification functions -- Test generic object addressing/identification functions
CREATE SCHEMA addr_nsp; CREATE SCHEMA addr_nsp;
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
index 53e86b0b6c..0303fdfe96 100644 index 53e86b0b6c..f07cf1ec54 100644
--- a/src/test/regress/sql/password.sql --- a/src/test/regress/sql/password.sql
+++ b/src/test/regress/sql/password.sql +++ b/src/test/regress/sql/password.sql
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok @@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
@@ -3192,59 +3213,23 @@ index 53e86b0b6c..0303fdfe96 100644
-- check list of created entries -- check list of created entries
-- --
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2; @@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
SET password_encryption = 'md5'; SET password_encryption = 'md5';
-- encrypt with MD5 -- encrypt with MD5
-ALTER ROLE regress_passwd2 PASSWORD 'foo'; -ALTER ROLE regress_passwd2 PASSWORD 'foo';
--- already encrypted, use as they are
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER; +ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted, use as they are
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
SET password_encryption = 'scram-sha-256'; SET password_encryption = 'scram-sha-256';
-- create SCRAM secret -- create SCRAM secret
-ALTER ROLE regress_passwd4 PASSWORD 'foo'; -ALTER ROLE regress_passwd4 PASSWORD 'foo';
--- already encrypted with MD5, use as it is
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER; +ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+-- Neon does not support encrypted passwords, use unencrypted instead -- already encrypted with MD5, use as it is
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER; CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
--- so it should be hashed with SCRAM-SHA-256.
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
--- These may look like valid MD5 secrets, but they are not, so they
--- should be hashed with SCRAM-SHA-256.
--- trailing garbage at the end
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
--- invalid length
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Changing the SCRAM iteration count
SET scram_iterations = 1024;
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
--- Test with invalid stored and server keys.
---
--- The first is valid, to act as a control. The others have too long
--- stored/server keys. They will be re-hashed.
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Check that the invalid secrets were re-hashed. A re-hashed secret
-- should not contain the original salt.
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index 249df17a58..b258e7f26a 100644 index 249df17a58..b258e7f26a 100644
--- a/src/test/regress/sql/privileges.sql --- a/src/test/regress/sql/privileges.sql

File diff suppressed because it is too large Load Diff

View File

@@ -246,48 +246,47 @@ fn try_spec_from_cli(
let compute_id = matches.get_one::<String>("compute-id"); let compute_id = matches.get_one::<String>("compute-id");
let control_plane_uri = matches.get_one::<String>("control-plane-uri"); let control_plane_uri = matches.get_one::<String>("control-plane-uri");
// First, try to get cluster spec from the cli argument let spec;
if let Some(spec_json) = spec_json { let mut live_config_allowed = false;
info!("got spec from cli argument {}", spec_json); match spec_json {
return Ok(CliSpecParams { // First, try to get cluster spec from the cli argument
spec: Some(serde_json::from_str(spec_json)?), Some(json) => {
live_config_allowed: false, info!("got spec from cli argument {}", json);
}); spec = Some(serde_json::from_str(json)?);
}
// Second, try to read it from the file if path is provided
if let Some(spec_path) = spec_path {
let file = File::open(Path::new(spec_path))?;
return Ok(CliSpecParams {
spec: Some(serde_json::from_reader(file)?),
live_config_allowed: true,
});
}
let Some(compute_id) = compute_id else {
panic!(
"compute spec should be provided by one of the following ways: \
--spec OR --spec-path OR --control-plane-uri and --compute-id"
);
};
let Some(control_plane_uri) = control_plane_uri else {
panic!("must specify both --control-plane-uri and --compute-id or none");
};
match get_spec_from_control_plane(control_plane_uri, compute_id) {
Ok(spec) => Ok(CliSpecParams {
spec,
live_config_allowed: true,
}),
Err(e) => {
error!(
"cannot get response from control plane: {}\n\
neither spec nor confirmation that compute is in the Empty state was received",
e
);
Err(e)
} }
} None => {
// Second, try to read it from the file if path is provided
if let Some(sp) = spec_path {
let path = Path::new(sp);
let file = File::open(path)?;
spec = Some(serde_json::from_reader(file)?);
live_config_allowed = true;
} else if let Some(id) = compute_id {
if let Some(cp_base) = control_plane_uri {
live_config_allowed = true;
spec = match get_spec_from_control_plane(cp_base, id) {
Ok(s) => s,
Err(e) => {
error!("cannot get response from control plane: {}", e);
panic!("neither spec nor confirmation that compute is in the Empty state was received");
}
};
} else {
panic!("must specify both --control-plane-uri and --compute-id or none");
}
} else {
panic!(
"compute spec should be provided by one of the following ways: \
--spec OR --spec-path OR --control-plane-uri and --compute-id"
);
}
}
};
Ok(CliSpecParams {
spec,
live_config_allowed,
})
} }
struct CliSpecParams { struct CliSpecParams {

View File

@@ -537,14 +537,12 @@ components:
properties: properties:
extname: extname:
type: string type: string
version: versions:
type: string type: array
items: items:
type: string type: string
n_databases: n_databases:
type: integer type: integer
owned_by_superuser:
type: integer
SetRoleGrantsRequest: SetRoleGrantsRequest:
type: object type: object

View File

@@ -1,6 +1,7 @@
use compute_api::responses::{InstalledExtension, InstalledExtensions}; use compute_api::responses::{InstalledExtension, InstalledExtensions};
use metrics::proto::MetricFamily; use metrics::proto::MetricFamily;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet;
use anyhow::Result; use anyhow::Result;
use postgres::{Client, NoTls}; use postgres::{Client, NoTls};
@@ -37,77 +38,61 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
/// Connect to every database (see list_dbs above) and get the list of installed extensions. /// Connect to every database (see list_dbs above) and get the list of installed extensions.
/// ///
/// Same extension can be installed in multiple databases with different versions, /// Same extension can be installed in multiple databases with different versions,
/// so we report a separate metric (number of databases where it is installed) /// we only keep the highest and lowest version across all databases.
/// for each extension version.
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> { pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
conf.application_name("compute_ctl:get_installed_extensions"); conf.application_name("compute_ctl:get_installed_extensions");
let mut client = conf.connect(NoTls)?; let mut client = conf.connect(NoTls)?;
let databases: Vec<String> = list_dbs(&mut client)?; let databases: Vec<String> = list_dbs(&mut client)?;
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new(); let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
for db in databases.iter() { for db in databases.iter() {
conf.dbname(db); conf.dbname(db);
let mut db_client = conf.connect(NoTls)?; let mut db_client = conf.connect(NoTls)?;
let extensions: Vec<(String, String, i32)> = db_client let extensions: Vec<(String, String)> = db_client
.query( .query(
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension", "SELECT extname, extversion FROM pg_catalog.pg_extension;",
&[], &[],
)? )?
.iter() .iter()
.map(|row| { .map(|row| (row.get("extname"), row.get("extversion")))
(
row.get("extname"),
row.get("extversion"),
row.get("extowner"),
)
})
.collect(); .collect();
for (extname, v, extowner) in extensions.iter() { for (extname, v) in extensions.iter() {
let version = v.to_string(); let version = v.to_string();
// check if the extension is owned by superuser // increment the number of databases where the version of extension is installed
// 10 is the oid of superuser INSTALLED_EXTENSIONS
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" }; .with_label_values(&[extname, &version])
.inc();
extensions_map extensions_map
.entry(( .entry(extname.to_string())
extname.to_string(),
version.clone(),
owned_by_superuser.to_string(),
))
.and_modify(|e| { .and_modify(|e| {
e.versions.insert(version.clone());
// count the number of databases where the extension is installed // count the number of databases where the extension is installed
e.n_databases += 1; e.n_databases += 1;
}) })
.or_insert(InstalledExtension { .or_insert(InstalledExtension {
extname: extname.to_string(), extname: extname.to_string(),
version: version.clone(), versions: HashSet::from([version.clone()]),
n_databases: 1, n_databases: 1,
owned_by_superuser: owned_by_superuser.to_string(),
}); });
} }
} }
for (key, ext) in extensions_map.iter() { let res = InstalledExtensions {
let (extname, version, owned_by_superuser) = key;
let n_databases = ext.n_databases as u64;
INSTALLED_EXTENSIONS
.with_label_values(&[extname, version, owned_by_superuser])
.set(n_databases);
}
Ok(InstalledExtensions {
extensions: extensions_map.into_values().collect(), extensions: extensions_map.into_values().collect(),
}) };
Ok(res)
} }
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| { static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
register_uint_gauge_vec!( register_uint_gauge_vec!(
"compute_installed_extensions", "compute_installed_extensions",
"Number of databases where the version of extension is installed", "Number of databases where the version of extension is installed",
&["extension_name", "version", "owned_by_superuser"] &["extension_name", "version"]
) )
.expect("failed to define a metric") .expect("failed to define a metric")
}); });

View File

@@ -274,7 +274,6 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
for env_key in [ for env_key in [
"AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY",
"AWS_SESSION_TOKEN",
"AWS_PROFILE", "AWS_PROFILE",
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions. // HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
"HOME", "HOME",

View File

@@ -810,7 +810,7 @@ impl Endpoint {
} }
let client = reqwest::Client::builder() let client = reqwest::Client::builder()
.timeout(Duration::from_secs(120)) .timeout(Duration::from_secs(30))
.build() .build()
.unwrap(); .unwrap();
let response = client let response = client

View File

@@ -435,7 +435,7 @@ impl PageServerNode {
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let config = Self::parse_config(settings)?; let config = Self::parse_config(settings)?;
self.http_client self.http_client
.set_tenant_config(&models::TenantConfigRequest { tenant_id, config }) .tenant_config(&models::TenantConfigRequest { tenant_id, config })
.await?; .await?;
Ok(()) Ok(())

View File

@@ -9,8 +9,8 @@ use pageserver_api::{
}, },
models::{ models::{
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary, EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest, ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest,
TenantShardSplitRequest, TenantShardSplitResponse, TenantShardSplitResponse,
}, },
shard::{ShardStripeSize, TenantShardId}, shard::{ShardStripeSize, TenantShardId},
}; };
@@ -116,19 +116,9 @@ enum Command {
#[arg(long)] #[arg(long)]
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
}, },
/// Set the pageserver tenant configuration of a tenant: this is the configuration structure /// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
/// that is passed through to pageservers, and does not affect storage controller behavior. /// that is passed through to pageservers, and does not affect storage controller behavior.
/// Any previous tenant configs are overwritten. TenantConfig {
SetTenantConfig {
#[arg(long)]
tenant_id: TenantId,
#[arg(long)]
config: String,
},
/// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
/// provided JSON are unset from the tenant config and all fields with non-null values are set.
/// Unspecified fields are not changed.
PatchTenantConfig {
#[arg(long)] #[arg(long)]
tenant_id: TenantId, tenant_id: TenantId,
#[arg(long)] #[arg(long)]
@@ -559,21 +549,11 @@ async fn main() -> anyhow::Result<()> {
) )
.await?; .await?;
} }
Command::SetTenantConfig { tenant_id, config } => { Command::TenantConfig { tenant_id, config } => {
let tenant_conf = serde_json::from_str(&config)?; let tenant_conf = serde_json::from_str(&config)?;
vps_client vps_client
.set_tenant_config(&TenantConfigRequest { .tenant_config(&TenantConfigRequest {
tenant_id,
config: tenant_conf,
})
.await?;
}
Command::PatchTenantConfig { tenant_id, config } => {
let tenant_conf = serde_json::from_str(&config)?;
vps_client
.patch_tenant_config(&TenantConfigPatchRequest {
tenant_id, tenant_id,
config: tenant_conf, config: tenant_conf,
}) })
@@ -756,7 +736,7 @@ async fn main() -> anyhow::Result<()> {
threshold, threshold,
} => { } => {
vps_client vps_client
.set_tenant_config(&TenantConfigRequest { .tenant_config(&TenantConfigRequest {
tenant_id, tenant_id,
config: TenantConfig { config: TenantConfig {
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold( eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(

View File

@@ -42,7 +42,6 @@ allow = [
"MPL-2.0", "MPL-2.0",
"OpenSSL", "OpenSSL",
"Unicode-DFS-2016", "Unicode-DFS-2016",
"Unicode-3.0",
] ]
confidence-threshold = 0.8 confidence-threshold = 0.8
exceptions = [ exceptions = [

View File

@@ -132,6 +132,11 @@
"name": "cron.database", "name": "cron.database",
"value": "postgres", "value": "postgres",
"vartype": "string" "vartype": "string"
},
{
"name": "session_preload_libraries",
"value": "anon",
"vartype": "string"
} }
] ]
}, },

View File

@@ -35,11 +35,11 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
echo "clean up containers if exists" echo "clean up containers if exists"
cleanup cleanup
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version)) PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
# The support of pg_anon not yet added to PG17, so we have to add the corresponding option for other PG versions # The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
if [ "${pg_version}" -ne 17 ]; then if [ $pg_version -eq 17 ]; then
SPEC_PATH="compute_wrapper/var/db/postgres/specs" SPEC_PATH="compute_wrapper/var/db/postgres/specs"
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
jq '.cluster.settings += [{"name": "session_preload_libraries","value": "anon","vartype": "string"}]' "${SPEC_PATH}/spec.bak" > "${SPEC_PATH}/spec.json" jq 'del(.cluster.settings[] | select (.name == "session_preload_libraries"))' $SPEC_PATH/spec.bak > $SPEC_PATH/spec.json
fi fi
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
@@ -106,8 +106,8 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
fi fi
fi fi
cleanup cleanup
# Restore the original spec.json # The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
if [ "$pg_version" -ne 17 ]; then if [ $pg_version -eq 17 ]; then
mv "$SPEC_PATH/spec.bak" "$SPEC_PATH/spec.json" mv $SPEC_PATH/spec.bak $SPEC_PATH/spec.json
fi fi
done done

View File

@@ -1,5 +1,6 @@
//! Structs representing the JSON formats used in the compute_ctl's HTTP API. //! Structs representing the JSON formats used in the compute_ctl's HTTP API.
use std::collections::HashSet;
use std::fmt::Display; use std::fmt::Display;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
@@ -162,9 +163,8 @@ pub enum ControlPlaneComputeStatus {
#[derive(Clone, Debug, Default, Serialize)] #[derive(Clone, Debug, Default, Serialize)]
pub struct InstalledExtension { pub struct InstalledExtension {
pub extname: String, pub extname: String,
pub version: String, pub versions: HashSet<String>,
pub n_databases: u32, // Number of databases using this extension pub n_databases: u32, // Number of databases using this extension
pub owned_by_superuser: String,
} }
#[derive(Clone, Debug, Default, Serialize)] #[derive(Clone, Debug, Default, Serialize)]

View File

@@ -91,7 +91,7 @@ impl Timing {
/// Return true if there is a ready event. /// Return true if there is a ready event.
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool { fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
queue.peek().is_some_and(|x| x.time <= self.now()) queue.peek().map_or(false, |x| x.time <= self.now())
} }
/// Clear all pending events. /// Clear all pending events.

View File

@@ -75,7 +75,7 @@ pub struct TenantPolicyRequest {
pub scheduling: Option<ShardSchedulingPolicy>, pub scheduling: Option<ShardSchedulingPolicy>,
} }
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
pub struct AvailabilityZone(pub String); pub struct AvailabilityZone(pub String);
impl Display for AvailabilityZone { impl Display for AvailabilityZone {
@@ -245,17 +245,6 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
} }
} }
/// Scheduling policy enables us to selectively disable some automatic actions that the
/// controller performs on a tenant shard. This is only set to a non-default value by
/// human intervention, and it is reset to the default value (Active) when the tenant's
/// placement policy is modified away from Attached.
///
/// The typical use of a non-Active scheduling policy is one of:
/// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
/// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
///
/// If you're not sure which policy to use to pin a shard to its current location, you probably
/// want Pause.
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
pub enum ShardSchedulingPolicy { pub enum ShardSchedulingPolicy {
// Normal mode: the tenant's scheduled locations may be updated at will, including // Normal mode: the tenant's scheduled locations may be updated at will, including

View File

@@ -24,7 +24,7 @@ pub struct Key {
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as /// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
/// a struct of fields. /// a struct of fields.
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug)] #[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]
pub struct CompactKey(i128); pub struct CompactKey(i128);
/// The storage key size. /// The storage key size.
@@ -565,10 +565,6 @@ impl Key {
&& self.field5 == 0 && self.field5 == 0
&& self.field6 == u32::MAX && self.field6 == u32::MAX
} }
pub fn is_slru_dir_key(&self) -> bool {
slru_dir_kind(self).is_some()
}
} }
#[inline(always)] #[inline(always)]

View File

@@ -17,7 +17,7 @@ use std::{
use byteorder::{BigEndian, ReadBytesExt}; use byteorder::{BigEndian, ReadBytesExt};
use postgres_ffi::BLCKSZ; use postgres_ffi::BLCKSZ;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Serialize};
use serde_with::serde_as; use serde_with::serde_as;
use utils::{ use utils::{
completion, completion,
@@ -325,115 +325,6 @@ impl Default for ShardParameters {
} }
} }
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub enum FieldPatch<T> {
Upsert(T),
Remove,
#[default]
Noop,
}
impl<T> FieldPatch<T> {
fn is_noop(&self) -> bool {
matches!(self, FieldPatch::Noop)
}
pub fn apply(self, target: &mut Option<T>) {
match self {
Self::Upsert(v) => *target = Some(v),
Self::Remove => *target = None,
Self::Noop => {}
}
}
pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
match self {
Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
Self::Remove => Ok(FieldPatch::<U>::Remove),
Self::Noop => Ok(FieldPatch::<U>::Noop),
}
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Option::deserialize(deserializer).map(|opt| match opt {
None => FieldPatch::Remove,
Some(val) => FieldPatch::Upsert(val),
})
}
}
impl<T: Serialize> Serialize for FieldPatch<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
FieldPatch::Upsert(val) => serializer.serialize_some(val),
FieldPatch::Remove => serializer.serialize_none(),
FieldPatch::Noop => unreachable!(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
#[serde(default)]
pub struct TenantConfigPatch {
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub checkpoint_distance: FieldPatch<u64>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub checkpoint_timeout: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub compaction_target_size: FieldPatch<u64>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub compaction_period: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub compaction_threshold: FieldPatch<usize>,
// defer parsing compaction_algorithm, like eviction_policy
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub gc_horizon: FieldPatch<u64>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub gc_period: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub image_creation_threshold: FieldPatch<usize>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub pitr_interval: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub walreceiver_connect_timeout: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub lagging_wal_timeout: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub eviction_policy: FieldPatch<EvictionPolicy>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub min_resident_size_override: FieldPatch<u64>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub heatmap_period: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub lazy_slru_download: FieldPatch<bool>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub image_layer_creation_check_threshold: FieldPatch<u8>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub lsn_lease_length: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub lsn_lease_length_for_ts: FieldPatch<String>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub timeline_offloading: FieldPatch<bool>,
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
}
/// An alternative representation of `pageserver::tenant::TenantConf` with /// An alternative representation of `pageserver::tenant::TenantConf` with
/// simpler types. /// simpler types.
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)] #[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
@@ -465,107 +356,6 @@ pub struct TenantConfig {
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>, pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
} }
impl TenantConfig {
pub fn apply_patch(self, patch: TenantConfigPatch) -> TenantConfig {
let Self {
mut checkpoint_distance,
mut checkpoint_timeout,
mut compaction_target_size,
mut compaction_period,
mut compaction_threshold,
mut compaction_algorithm,
mut gc_horizon,
mut gc_period,
mut image_creation_threshold,
mut pitr_interval,
mut walreceiver_connect_timeout,
mut lagging_wal_timeout,
mut max_lsn_wal_lag,
mut eviction_policy,
mut min_resident_size_override,
mut evictions_low_residence_duration_metric_threshold,
mut heatmap_period,
mut lazy_slru_download,
mut timeline_get_throttle,
mut image_layer_creation_check_threshold,
mut lsn_lease_length,
mut lsn_lease_length_for_ts,
mut timeline_offloading,
mut wal_receiver_protocol_override,
} = self;
patch.checkpoint_distance.apply(&mut checkpoint_distance);
patch.checkpoint_timeout.apply(&mut checkpoint_timeout);
patch
.compaction_target_size
.apply(&mut compaction_target_size);
patch.compaction_period.apply(&mut compaction_period);
patch.compaction_threshold.apply(&mut compaction_threshold);
patch.compaction_algorithm.apply(&mut compaction_algorithm);
patch.gc_horizon.apply(&mut gc_horizon);
patch.gc_period.apply(&mut gc_period);
patch
.image_creation_threshold
.apply(&mut image_creation_threshold);
patch.pitr_interval.apply(&mut pitr_interval);
patch
.walreceiver_connect_timeout
.apply(&mut walreceiver_connect_timeout);
patch.lagging_wal_timeout.apply(&mut lagging_wal_timeout);
patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
patch.eviction_policy.apply(&mut eviction_policy);
patch
.min_resident_size_override
.apply(&mut min_resident_size_override);
patch
.evictions_low_residence_duration_metric_threshold
.apply(&mut evictions_low_residence_duration_metric_threshold);
patch.heatmap_period.apply(&mut heatmap_period);
patch.lazy_slru_download.apply(&mut lazy_slru_download);
patch
.timeline_get_throttle
.apply(&mut timeline_get_throttle);
patch
.image_layer_creation_check_threshold
.apply(&mut image_layer_creation_check_threshold);
patch.lsn_lease_length.apply(&mut lsn_lease_length);
patch
.lsn_lease_length_for_ts
.apply(&mut lsn_lease_length_for_ts);
patch.timeline_offloading.apply(&mut timeline_offloading);
patch
.wal_receiver_protocol_override
.apply(&mut wal_receiver_protocol_override);
Self {
checkpoint_distance,
checkpoint_timeout,
compaction_target_size,
compaction_period,
compaction_threshold,
compaction_algorithm,
gc_horizon,
gc_period,
image_creation_threshold,
pitr_interval,
walreceiver_connect_timeout,
lagging_wal_timeout,
max_lsn_wal_lag,
eviction_policy,
min_resident_size_override,
evictions_low_residence_duration_metric_threshold,
heatmap_period,
lazy_slru_download,
timeline_get_throttle,
image_layer_creation_check_threshold,
lsn_lease_length,
lsn_lease_length_for_ts,
timeline_offloading,
wal_receiver_protocol_override,
}
}
}
/// The policy for the aux file storage. /// The policy for the aux file storage.
/// ///
/// It can be switched through `switch_aux_file_policy` tenant config. /// It can be switched through `switch_aux_file_policy` tenant config.
@@ -896,14 +686,6 @@ impl TenantConfigRequest {
} }
} }
#[derive(Serialize, Deserialize, Debug)]
#[serde(deny_unknown_fields)]
pub struct TenantConfigPatchRequest {
pub tenant_id: TenantId,
#[serde(flatten)]
pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
}
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context. /// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
#[derive(Serialize, Deserialize, Clone)] #[derive(Serialize, Deserialize, Clone)]
#[serde(tag = "slug", content = "data", rename_all = "snake_case")] #[serde(tag = "slug", content = "data", rename_all = "snake_case")]
@@ -1917,45 +1699,4 @@ mod tests {
); );
} }
} }
#[test]
fn test_tenant_config_patch_request_serde() {
let patch_request = TenantConfigPatchRequest {
tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
config: TenantConfigPatch {
checkpoint_distance: FieldPatch::Upsert(42),
gc_horizon: FieldPatch::Remove,
compaction_threshold: FieldPatch::Noop,
..TenantConfigPatch::default()
},
};
let json = serde_json::to_string(&patch_request).unwrap();
let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
assert_eq!(json, expected);
let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.tenant_id, patch_request.tenant_id);
assert_eq!(decoded.config, patch_request.config);
// Now apply the patch to a config to demonstrate semantics
let base = TenantConfig {
checkpoint_distance: Some(28),
gc_horizon: Some(100),
compaction_target_size: Some(1024),
..Default::default()
};
let expected = TenantConfig {
checkpoint_distance: Some(42),
gc_horizon: None,
..base.clone()
};
let patched = base.apply_patch(decoded.config);
assert_eq!(patched, expected);
}
} }

View File

@@ -173,11 +173,7 @@ impl ShardIdentity {
/// Return true if the key should be stored on all shards, not just one. /// Return true if the key should be stored on all shards, not just one.
pub fn is_key_global(&self, key: &Key) -> bool { pub fn is_key_global(&self, key: &Key) -> bool {
if key.is_slru_block_key() if key.is_slru_block_key() || key.is_slru_segment_size_key() || key.is_aux_file_key() {
|| key.is_slru_segment_size_key()
|| key.is_aux_file_key()
|| key.is_slru_dir_key()
{
// Special keys that are only stored on shard 0 // Special keys that are only stored on shard 0
false false
} else if key.is_rel_block_key() { } else if key.is_rel_block_key() {

View File

@@ -9,11 +9,9 @@ regex.workspace = true
bytes.workspace = true bytes.workspace = true
anyhow.workspace = true anyhow.workspace = true
crc32c.workspace = true crc32c.workspace = true
criterion.workspace = true
once_cell.workspace = true once_cell.workspace = true
log.workspace = true log.workspace = true
memoffset.workspace = true memoffset.workspace = true
pprof.workspace = true
thiserror.workspace = true thiserror.workspace = true
serde.workspace = true serde.workspace = true
utils.workspace = true utils.workspace = true
@@ -26,7 +24,3 @@ postgres.workspace = true
[build-dependencies] [build-dependencies]
anyhow.workspace = true anyhow.workspace = true
bindgen.workspace = true bindgen.workspace = true
[[bench]]
name = "waldecoder"
harness = false

View File

@@ -1,26 +0,0 @@
## Benchmarks
To run benchmarks:
```sh
# All benchmarks.
cargo bench --package postgres_ffi
# Specific file.
cargo bench --package postgres_ffi --bench waldecoder
# Specific benchmark.
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024
# List available benchmarks.
cargo bench --package postgres_ffi --benches -- --list
# Generate flamegraph profiles using pprof-rs, profiling for 10 seconds.
# Output in target/criterion/*/profile/flamegraph.svg.
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024 -- --profile-time 10
```
Additional charts and statistics are available in `target/criterion/report/index.html`.
Benchmarks are automatically compared against the previous run. To compare against other runs, see
`--baseline` and `--save-baseline`.

View File

@@ -1,49 +0,0 @@
use std::ffi::CStr;
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use postgres_ffi::v17::wal_generator::LogicalMessageGenerator;
use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
use postgres_ffi::waldecoder::WalStreamDecoder;
use pprof::criterion::{Output, PProfProfiler};
use utils::lsn::Lsn;
const KB: usize = 1024;
// Register benchmarks with Criterion.
criterion_group!(
name = benches;
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_complete_record,
);
criterion_main!(benches);
/// Benchmarks WalStreamDecoder::complete_record() for a logical message of varying size.
fn bench_complete_record(c: &mut Criterion) {
let mut g = c.benchmark_group("complete_record");
for size in [64, KB, 8 * KB, 128 * KB] {
// Kind of weird to change the group throughput per benchmark, but it's the only way
// to vary it per benchmark. It works.
g.throughput(criterion::Throughput::Bytes(size as u64));
g.bench_function(format!("size={size}"), |b| run_bench(b, size).unwrap());
}
fn run_bench(b: &mut Bencher, size: usize) -> anyhow::Result<()> {
const PREFIX: &CStr = c"";
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
let value = vec![1; value_size];
let mut decoder = WalStreamDecoder::new(Lsn(0), 170000);
let msg = LogicalMessageGenerator::new(PREFIX, &value)
.next()
.unwrap()
.encode(Lsn(0));
assert_eq!(msg.len(), size);
b.iter(|| {
let msg = msg.clone(); // Bytes::clone() is cheap
decoder.complete_record(msg).unwrap();
});
Ok(())
}
}

View File

@@ -231,22 +231,6 @@ impl LogicalMessageGenerator {
}; };
[&header.encode(), prefix, message].concat().into() [&header.encode(), prefix, message].concat().into()
} }
/// Computes how large a value must be to get a record of the given size. Convenience method to
/// construct records of pre-determined size. Panics if the record size is too small.
pub fn make_value_size(record_size: usize, prefix: &CStr) -> usize {
let xlog_header_size = XLOG_SIZE_OF_XLOG_RECORD;
let lm_header_size = size_of::<XlLogicalMessage>();
let prefix_size = prefix.to_bytes_with_nul().len();
let data_header_size = match record_size - xlog_header_size - 2 {
0..=255 => 2,
256..=258 => panic!("impossible record_size {record_size}"),
259.. => 5,
};
record_size
.checked_sub(xlog_header_size + lm_header_size + prefix_size + data_header_size)
.expect("record_size too small")
}
} }
impl Iterator for LogicalMessageGenerator { impl Iterator for LogicalMessageGenerator {

View File

@@ -81,7 +81,7 @@ fn test_end_of_wal<C: crate::Crafter>(test_name: &str) {
continue; continue;
} }
let mut f = File::options().write(true).open(file.path()).unwrap(); let mut f = File::options().write(true).open(file.path()).unwrap();
static ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE]; const ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE];
f.write_all( f.write_all(
&ZEROS[0..min( &ZEROS[0..min(
WAL_SEGMENT_SIZE, WAL_SEGMENT_SIZE,

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "postgres-protocol2" name = "postgres-protocol2"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2018"
license = "MIT/Apache-2.0" license = "MIT/Apache-2.0"
[dependencies] [dependencies]

View File

@@ -9,7 +9,8 @@
//! //!
//! This library assumes that the `client_encoding` backend parameter has been //! This library assumes that the `client_encoding` backend parameter has been
//! set to `UTF8`. It will most likely not behave properly if that is not the case. //! set to `UTF8`. It will most likely not behave properly if that is not the case.
#![warn(missing_docs, clippy::all)] #![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")]
#![warn(missing_docs, rust_2018_idioms, clippy::all)]
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};

View File

@@ -3,6 +3,7 @@
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use std::convert::TryFrom;
use std::error::Error; use std::error::Error;
use std::io; use std::io;
use std::marker; use std::marker;

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "postgres-types2" name = "postgres-types2"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2018"
license = "MIT/Apache-2.0" license = "MIT/Apache-2.0"
[dependencies] [dependencies]

View File

@@ -2,7 +2,8 @@
//! //!
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it //! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
//! unless you want to define your own `ToSql` or `FromSql` definitions. //! unless you want to define your own `ToSql` or `FromSql` definitions.
#![warn(clippy::all, missing_docs)] #![doc(html_root_url = "https://docs.rs/postgres-types/0.2")]
#![warn(clippy::all, rust_2018_idioms, missing_docs)]
use fallible_iterator::FallibleIterator; use fallible_iterator::FallibleIterator;
use postgres_protocol2::types; use postgres_protocol2::types;

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "tokio-postgres2" name = "tokio-postgres2"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2018"
license = "MIT/Apache-2.0" license = "MIT/Apache-2.0"
[dependencies] [dependencies]

View File

@@ -4,23 +4,18 @@ use crate::config::Host;
use crate::config::SslMode; use crate::config::SslMode;
use crate::connection::{Request, RequestMessages}; use crate::connection::{Request, RequestMessages};
use crate::query::RowStream; use crate::types::{Oid, Type};
use crate::simple_query::SimpleQueryStream;
use crate::types::{Oid, ToSql, Type};
use crate::{ use crate::{
prepare, query, simple_query, slice_iter, CancelToken, Error, ReadyForQueryStatus, Row, simple_query, CancelToken, Error, ReadyForQueryStatus, Statement, Transaction,
SimpleQueryMessage, Statement, ToStatement, Transaction, TransactionBuilder, TransactionBuilder,
}; };
use bytes::BytesMut; use bytes::BytesMut;
use fallible_iterator::FallibleIterator; use fallible_iterator::FallibleIterator;
use futures_util::{future, ready, TryStreamExt}; use futures_util::{future, ready};
use parking_lot::Mutex;
use postgres_protocol2::message::{backend::Message, frontend}; use postgres_protocol2::message::{backend::Message, frontend};
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use tokio::sync::mpsc; use tokio::sync::mpsc;
@@ -55,7 +50,7 @@ impl Responses {
/// A cache of type info and prepared statements for fetching type info /// A cache of type info and prepared statements for fetching type info
/// (corresponding to the queries in the [prepare] module). /// (corresponding to the queries in the [prepare] module).
#[derive(Default)] #[derive(Default)]
struct CachedTypeInfo { pub(crate) struct CachedTypeInfo {
/// A statement for basic information for a type from its /// A statement for basic information for a type from its
/// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its /// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its
/// fallback). /// fallback).
@@ -71,13 +66,45 @@ struct CachedTypeInfo {
/// Cache of types already looked up. /// Cache of types already looked up.
types: HashMap<Oid, Type>, types: HashMap<Oid, Type>,
} }
impl CachedTypeInfo {
pub(crate) fn typeinfo(&mut self) -> Option<&Statement> {
self.typeinfo.as_ref()
}
pub(crate) fn set_typeinfo(&mut self, statement: Statement) -> &Statement {
self.typeinfo.insert(statement)
}
pub(crate) fn typeinfo_composite(&mut self) -> Option<&Statement> {
self.typeinfo_composite.as_ref()
}
pub(crate) fn set_typeinfo_composite(&mut self, statement: Statement) -> &Statement {
self.typeinfo_composite.insert(statement)
}
pub(crate) fn typeinfo_enum(&mut self) -> Option<&Statement> {
self.typeinfo_enum.as_ref()
}
pub(crate) fn set_typeinfo_enum(&mut self, statement: Statement) -> &Statement {
self.typeinfo_enum.insert(statement)
}
pub(crate) fn type_(&mut self, oid: Oid) -> Option<Type> {
self.types.get(&oid).cloned()
}
pub(crate) fn set_type(&mut self, oid: Oid, type_: &Type) {
self.types.insert(oid, type_.clone());
}
}
pub struct InnerClient { pub struct InnerClient {
sender: mpsc::UnboundedSender<Request>, sender: mpsc::UnboundedSender<Request>,
cached_typeinfo: Mutex<CachedTypeInfo>,
/// A buffer to use when writing out postgres commands. /// A buffer to use when writing out postgres commands.
buffer: Mutex<BytesMut>, buffer: BytesMut,
} }
impl InnerClient { impl InnerClient {
@@ -92,47 +119,14 @@ impl InnerClient {
}) })
} }
pub fn typeinfo(&self) -> Option<Statement> {
self.cached_typeinfo.lock().typeinfo.clone()
}
pub fn set_typeinfo(&self, statement: &Statement) {
self.cached_typeinfo.lock().typeinfo = Some(statement.clone());
}
pub fn typeinfo_composite(&self) -> Option<Statement> {
self.cached_typeinfo.lock().typeinfo_composite.clone()
}
pub fn set_typeinfo_composite(&self, statement: &Statement) {
self.cached_typeinfo.lock().typeinfo_composite = Some(statement.clone());
}
pub fn typeinfo_enum(&self) -> Option<Statement> {
self.cached_typeinfo.lock().typeinfo_enum.clone()
}
pub fn set_typeinfo_enum(&self, statement: &Statement) {
self.cached_typeinfo.lock().typeinfo_enum = Some(statement.clone());
}
pub fn type_(&self, oid: Oid) -> Option<Type> {
self.cached_typeinfo.lock().types.get(&oid).cloned()
}
pub fn set_type(&self, oid: Oid, type_: &Type) {
self.cached_typeinfo.lock().types.insert(oid, type_.clone());
}
/// Call the given function with a buffer to be used when writing out /// Call the given function with a buffer to be used when writing out
/// postgres commands. /// postgres commands.
pub fn with_buf<F, R>(&self, f: F) -> R pub fn with_buf<F, R>(&mut self, f: F) -> R
where where
F: FnOnce(&mut BytesMut) -> R, F: FnOnce(&mut BytesMut) -> R,
{ {
let mut buffer = self.buffer.lock(); let r = f(&mut self.buffer);
let r = f(&mut buffer); self.buffer.clear();
buffer.clear();
r r
} }
} }
@@ -150,7 +144,8 @@ pub struct SocketConfig {
/// The client is one half of what is returned when a connection is established. Users interact with the database /// The client is one half of what is returned when a connection is established. Users interact with the database
/// through this client object. /// through this client object.
pub struct Client { pub struct Client {
inner: Arc<InnerClient>, pub(crate) inner: InnerClient,
pub(crate) cached_typeinfo: CachedTypeInfo,
socket_config: SocketConfig, socket_config: SocketConfig,
ssl_mode: SslMode, ssl_mode: SslMode,
@@ -167,11 +162,11 @@ impl Client {
secret_key: i32, secret_key: i32,
) -> Client { ) -> Client {
Client { Client {
inner: Arc::new(InnerClient { inner: InnerClient {
sender, sender,
cached_typeinfo: Default::default(),
buffer: Default::default(), buffer: Default::default(),
}), },
cached_typeinfo: Default::default(),
socket_config, socket_config,
ssl_mode, ssl_mode,
@@ -185,161 +180,6 @@ impl Client {
self.process_id self.process_id
} }
pub(crate) fn inner(&self) -> &Arc<InnerClient> {
&self.inner
}
/// Creates a new prepared statement.
///
/// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc),
/// which are set when executed. Prepared statements can only be used with the connection that created them.
pub async fn prepare(&self, query: &str) -> Result<Statement, Error> {
self.prepare_typed(query, &[]).await
}
/// Like `prepare`, but allows the types of query parameters to be explicitly specified.
///
/// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be
/// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`.
pub async fn prepare_typed(
&self,
query: &str,
parameter_types: &[Type],
) -> Result<Statement, Error> {
prepare::prepare(&self.inner, query, parameter_types).await
}
/// Executes a statement, returning a vector of the resulting rows.
///
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
/// provided, 1-indexed.
///
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
/// with the `prepare` method.
///
/// # Panics
///
/// Panics if the number of parameters provided does not match the number expected.
pub async fn query<T>(
&self,
statement: &T,
params: &[&(dyn ToSql + Sync)],
) -> Result<Vec<Row>, Error>
where
T: ?Sized + ToStatement,
{
self.query_raw(statement, slice_iter(params))
.await?
.try_collect()
.await
}
/// The maximally flexible version of [`query`].
///
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
/// provided, 1-indexed.
///
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
/// with the `prepare` method.
///
/// # Panics
///
/// Panics if the number of parameters provided does not match the number expected.
///
/// [`query`]: #method.query
pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<RowStream, Error>
where
T: ?Sized + ToStatement,
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
I::IntoIter: ExactSizeIterator,
{
let statement = statement.__convert().into_statement(self).await?;
query::query(&self.inner, statement, params).await
}
/// Pass text directly to the Postgres backend to allow it to sort out typing itself and
/// to save a roundtrip
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = Option<S>>,
I::IntoIter: ExactSizeIterator,
{
query::query_txt(&self.inner, statement, params).await
}
/// Executes a statement, returning the number of rows modified.
///
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
/// provided, 1-indexed.
///
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
/// with the `prepare` method.
///
/// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned.
///
/// # Panics
///
/// Panics if the number of parameters provided does not match the number expected.
pub async fn execute<T>(
&self,
statement: &T,
params: &[&(dyn ToSql + Sync)],
) -> Result<u64, Error>
where
T: ?Sized + ToStatement,
{
self.execute_raw(statement, slice_iter(params)).await
}
/// The maximally flexible version of [`execute`].
///
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
/// provided, 1-indexed.
///
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
/// with the `prepare` method.
///
/// # Panics
///
/// Panics if the number of parameters provided does not match the number expected.
///
/// [`execute`]: #method.execute
pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<u64, Error>
where
T: ?Sized + ToStatement,
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
I::IntoIter: ExactSizeIterator,
{
let statement = statement.__convert().into_statement(self).await?;
query::execute(self.inner(), statement, params).await
}
/// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows.
///
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
/// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings,
/// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the
/// rows, this method returns a list of an enum which indicates either the completion of one of the commands,
/// or a row of data. This preserves the framing between the separate statements in the request.
///
/// # Warning
///
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
/// them to this method!
pub async fn simple_query(&self, query: &str) -> Result<Vec<SimpleQueryMessage>, Error> {
self.simple_query_raw(query).await?.try_collect().await
}
pub(crate) async fn simple_query_raw(&self, query: &str) -> Result<SimpleQueryStream, Error> {
simple_query::simple_query(self.inner(), query).await
}
/// Executes a sequence of SQL statements using the simple query protocol. /// Executes a sequence of SQL statements using the simple query protocol.
/// ///
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
@@ -350,8 +190,8 @@ impl Client {
/// Prepared statements should be use for any query which contains user-specified data, as they provided the /// Prepared statements should be use for any query which contains user-specified data, as they provided the
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
/// them to this method! /// them to this method!
pub async fn batch_execute(&self, query: &str) -> Result<ReadyForQueryStatus, Error> { pub async fn batch_execute(&mut self, query: &str) -> Result<ReadyForQueryStatus, Error> {
simple_query::batch_execute(self.inner(), query).await simple_query::batch_execute(&mut self.inner, query).await
} }
/// Begins a new database transaction. /// Begins a new database transaction.
@@ -359,7 +199,7 @@ impl Client {
/// The transaction will roll back by default - use the `commit` method to commit it. /// The transaction will roll back by default - use the `commit` method to commit it.
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> { pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
struct RollbackIfNotDone<'me> { struct RollbackIfNotDone<'me> {
client: &'me Client, client: &'me mut Client,
done: bool, done: bool,
} }
@@ -369,13 +209,13 @@ impl Client {
return; return;
} }
let buf = self.client.inner().with_buf(|buf| { let buf = self.client.inner.with_buf(|buf| {
frontend::query("ROLLBACK", buf).unwrap(); frontend::query("ROLLBACK", buf).unwrap();
buf.split().freeze() buf.split().freeze()
}); });
let _ = self let _ = self
.client .client
.inner() .inner
.send(RequestMessages::Single(FrontendMessage::Raw(buf))); .send(RequestMessages::Single(FrontendMessage::Raw(buf)));
} }
} }
@@ -390,7 +230,7 @@ impl Client {
client: self, client: self,
done: false, done: false,
}; };
self.batch_execute("BEGIN").await?; cleaner.client.batch_execute("BEGIN").await?;
cleaner.done = true; cleaner.done = true;
} }
@@ -416,11 +256,6 @@ impl Client {
} }
} }
/// Query for type information
pub async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
crate::prepare::get_type(&self.inner, oid).await
}
/// Determines if the connection to the server has already closed. /// Determines if the connection to the server has already closed.
/// ///
/// In that case, all future queries will fail. /// In that case, all future queries will fail.

View File

@@ -1,4 +1,4 @@
use crate::query::RowStream; use crate::query::{self, RowStream};
use crate::types::Type; use crate::types::Type;
use crate::{Client, Error, Transaction}; use crate::{Client, Error, Transaction};
use async_trait::async_trait; use async_trait::async_trait;
@@ -13,33 +13,32 @@ mod private {
/// This trait is "sealed", and cannot be implemented outside of this crate. /// This trait is "sealed", and cannot be implemented outside of this crate.
#[async_trait] #[async_trait]
pub trait GenericClient: private::Sealed { pub trait GenericClient: private::Sealed {
/// Like `Client::query_raw_txt`. async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
where where
S: AsRef<str> + Sync + Send, S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send, I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send; I::IntoIter: ExactSizeIterator + Sync + Send;
/// Query for type information /// Query for type information
async fn get_type(&self, oid: Oid) -> Result<Type, Error>; async fn get_type(&mut self, oid: Oid) -> Result<Type, Error>;
} }
impl private::Sealed for Client {} impl private::Sealed for Client {}
#[async_trait] #[async_trait]
impl GenericClient for Client { impl GenericClient for Client {
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error> async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
where where
S: AsRef<str> + Sync + Send, S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send, I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send, I::IntoIter: ExactSizeIterator + Sync + Send,
{ {
self.query_raw_txt(statement, params).await query::query_txt(&mut self.inner, statement, params).await
} }
/// Query for type information /// Query for type information
async fn get_type(&self, oid: Oid) -> Result<Type, Error> { async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
self.get_type(oid).await crate::prepare::get_type(&mut self.inner, &mut self.cached_typeinfo, oid).await
} }
} }
@@ -48,17 +47,18 @@ impl private::Sealed for Transaction<'_> {}
#[async_trait] #[async_trait]
#[allow(clippy::needless_lifetimes)] #[allow(clippy::needless_lifetimes)]
impl GenericClient for Transaction<'_> { impl GenericClient for Transaction<'_> {
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error> async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
where where
S: AsRef<str> + Sync + Send, S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send, I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send, I::IntoIter: ExactSizeIterator + Sync + Send,
{ {
self.query_raw_txt(statement, params).await query::query_txt(&mut self.client().inner, statement, params).await
} }
/// Query for type information /// Query for type information
async fn get_type(&self, oid: Oid) -> Result<Type, Error> { async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
self.client().get_type(oid).await let client = self.client();
crate::prepare::get_type(&mut client.inner, &mut client.cached_typeinfo, oid).await
} }
} }

View File

@@ -1,5 +1,5 @@
//! An asynchronous, pipelined, PostgreSQL client. //! An asynchronous, pipelined, PostgreSQL client.
#![warn(clippy::all)] #![warn(rust_2018_idioms, clippy::all)]
pub use crate::cancel_token::CancelToken; pub use crate::cancel_token::CancelToken;
pub use crate::client::{Client, SocketConfig}; pub use crate::client::{Client, SocketConfig};
@@ -10,11 +10,10 @@ use crate::error::DbError;
pub use crate::error::Error; pub use crate::error::Error;
pub use crate::generic_client::GenericClient; pub use crate::generic_client::GenericClient;
pub use crate::query::RowStream; pub use crate::query::RowStream;
pub use crate::row::{Row, SimpleQueryRow}; pub use crate::row::Row;
pub use crate::simple_query::SimpleQueryStream;
pub use crate::statement::{Column, Statement}; pub use crate::statement::{Column, Statement};
pub use crate::tls::NoTls; pub use crate::tls::NoTls;
pub use crate::to_statement::ToStatement; // pub use crate::to_statement::ToStatement;
pub use crate::transaction::Transaction; pub use crate::transaction::Transaction;
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder}; pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
use crate::types::ToSql; use crate::types::ToSql;
@@ -65,7 +64,7 @@ pub mod row;
mod simple_query; mod simple_query;
mod statement; mod statement;
pub mod tls; pub mod tls;
mod to_statement; // mod to_statement;
mod transaction; mod transaction;
mod transaction_builder; mod transaction_builder;
pub mod types; pub mod types;
@@ -98,7 +97,6 @@ impl Notification {
/// An asynchronous message from the server. /// An asynchronous message from the server.
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
#[non_exhaustive]
pub enum AsyncMessage { pub enum AsyncMessage {
/// A notice. /// A notice.
/// ///
@@ -110,18 +108,6 @@ pub enum AsyncMessage {
Notification(Notification), Notification(Notification),
} }
/// Message returned by the `SimpleQuery` stream.
#[derive(Debug)]
#[non_exhaustive]
pub enum SimpleQueryMessage {
/// A row of data.
Row(SimpleQueryRow),
/// A statement in the query has completed.
///
/// The number of rows modified or selected is returned.
CommandComplete(u64),
}
fn slice_iter<'a>( fn slice_iter<'a>(
s: &'a [&'a (dyn ToSql + Sync)], s: &'a [&'a (dyn ToSql + Sync)],
) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a { ) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a {

View File

@@ -1,4 +1,4 @@
use crate::client::InnerClient; use crate::client::{CachedTypeInfo, InnerClient};
use crate::codec::FrontendMessage; use crate::codec::FrontendMessage;
use crate::connection::RequestMessages; use crate::connection::RequestMessages;
use crate::error::SqlState; use crate::error::SqlState;
@@ -7,14 +7,13 @@ use crate::{query, slice_iter};
use crate::{Column, Error, Statement}; use crate::{Column, Error, Statement};
use bytes::Bytes; use bytes::Bytes;
use fallible_iterator::FallibleIterator; use fallible_iterator::FallibleIterator;
use futures_util::{pin_mut, TryStreamExt}; use futures_util::{pin_mut, StreamExt, TryStreamExt};
use log::debug; use log::debug;
use postgres_protocol2::message::backend::Message; use postgres_protocol2::message::backend::Message;
use postgres_protocol2::message::frontend; use postgres_protocol2::message::frontend;
use std::future::Future; use std::future::Future;
use std::pin::Pin; use std::pin::{pin, Pin};
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
pub(crate) const TYPEINFO_QUERY: &str = "\ pub(crate) const TYPEINFO_QUERY: &str = "\
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
@@ -59,7 +58,8 @@ ORDER BY attnum
static NEXT_ID: AtomicUsize = AtomicUsize::new(0); static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
pub async fn prepare( pub async fn prepare(
client: &Arc<InnerClient>, client: &mut InnerClient,
cache: &mut CachedTypeInfo,
query: &str, query: &str,
types: &[Type], types: &[Type],
) -> Result<Statement, Error> { ) -> Result<Statement, Error> {
@@ -86,7 +86,7 @@ pub async fn prepare(
let mut parameters = vec![]; let mut parameters = vec![];
let mut it = parameter_description.parameters(); let mut it = parameter_description.parameters();
while let Some(oid) = it.next().map_err(Error::parse)? { while let Some(oid) = it.next().map_err(Error::parse)? {
let type_ = get_type(client, oid).await?; let type_ = get_type(client, cache, oid).await?;
parameters.push(type_); parameters.push(type_);
} }
@@ -94,24 +94,30 @@ pub async fn prepare(
if let Some(row_description) = row_description { if let Some(row_description) = row_description {
let mut it = row_description.fields(); let mut it = row_description.fields();
while let Some(field) = it.next().map_err(Error::parse)? { while let Some(field) = it.next().map_err(Error::parse)? {
let type_ = get_type(client, field.type_oid()).await?; let type_ = get_type(client, cache, field.type_oid()).await?;
let column = Column::new(field.name().to_string(), type_, field); let column = Column::new(field.name().to_string(), type_, field);
columns.push(column); columns.push(column);
} }
} }
Ok(Statement::new(client, name, parameters, columns)) Ok(Statement::new(name, parameters, columns))
} }
fn prepare_rec<'a>( fn prepare_rec<'a>(
client: &'a Arc<InnerClient>, client: &'a mut InnerClient,
cache: &'a mut CachedTypeInfo,
query: &'a str, query: &'a str,
types: &'a [Type], types: &'a [Type],
) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> { ) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> {
Box::pin(prepare(client, query, types)) Box::pin(prepare(client, cache, query, types))
} }
fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result<Bytes, Error> { fn encode(
client: &mut InnerClient,
name: &str,
query: &str,
types: &[Type],
) -> Result<Bytes, Error> {
if types.is_empty() { if types.is_empty() {
debug!("preparing query {}: {}", name, query); debug!("preparing query {}: {}", name, query);
} else { } else {
@@ -126,16 +132,20 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu
}) })
} }
pub async fn get_type(client: &Arc<InnerClient>, oid: Oid) -> Result<Type, Error> { pub async fn get_type(
client: &mut InnerClient,
cache: &mut CachedTypeInfo,
oid: Oid,
) -> Result<Type, Error> {
if let Some(type_) = Type::from_oid(oid) { if let Some(type_) = Type::from_oid(oid) {
return Ok(type_); return Ok(type_);
} }
if let Some(type_) = client.type_(oid) { if let Some(type_) = cache.type_(oid) {
return Ok(type_); return Ok(type_);
} }
let stmt = typeinfo_statement(client).await?; let stmt = typeinfo_statement(client, cache).await?;
let rows = query::query(client, stmt, slice_iter(&[&oid])).await?; let rows = query::query(client, stmt, slice_iter(&[&oid])).await?;
pin_mut!(rows); pin_mut!(rows);
@@ -145,118 +155,141 @@ pub async fn get_type(client: &Arc<InnerClient>, oid: Oid) -> Result<Type, Error
None => return Err(Error::unexpected_message()), None => return Err(Error::unexpected_message()),
}; };
let name: String = row.try_get(0)?; let name: String = row.try_get(stmt.columns(), 0)?;
let type_: i8 = row.try_get(1)?; let type_: i8 = row.try_get(stmt.columns(), 1)?;
let elem_oid: Oid = row.try_get(2)?; let elem_oid: Oid = row.try_get(stmt.columns(), 2)?;
let rngsubtype: Option<Oid> = row.try_get(3)?; let rngsubtype: Option<Oid> = row.try_get(stmt.columns(), 3)?;
let basetype: Oid = row.try_get(4)?; let basetype: Oid = row.try_get(stmt.columns(), 4)?;
let schema: String = row.try_get(5)?; let schema: String = row.try_get(stmt.columns(), 5)?;
let relid: Oid = row.try_get(6)?; let relid: Oid = row.try_get(stmt.columns(), 6)?;
let kind = if type_ == b'e' as i8 { let kind = if type_ == b'e' as i8 {
let variants = get_enum_variants(client, oid).await?; let variants = get_enum_variants(client, cache, oid).await?;
Kind::Enum(variants) Kind::Enum(variants)
} else if type_ == b'p' as i8 { } else if type_ == b'p' as i8 {
Kind::Pseudo Kind::Pseudo
} else if basetype != 0 { } else if basetype != 0 {
let type_ = get_type_rec(client, basetype).await?; let type_ = get_type_rec(client, cache, basetype).await?;
Kind::Domain(type_) Kind::Domain(type_)
} else if elem_oid != 0 { } else if elem_oid != 0 {
let type_ = get_type_rec(client, elem_oid).await?; let type_ = get_type_rec(client, cache, elem_oid).await?;
Kind::Array(type_) Kind::Array(type_)
} else if relid != 0 { } else if relid != 0 {
let fields = get_composite_fields(client, relid).await?; let fields = get_composite_fields(client, cache, relid).await?;
Kind::Composite(fields) Kind::Composite(fields)
} else if let Some(rngsubtype) = rngsubtype { } else if let Some(rngsubtype) = rngsubtype {
let type_ = get_type_rec(client, rngsubtype).await?; let type_ = get_type_rec(client, cache, rngsubtype).await?;
Kind::Range(type_) Kind::Range(type_)
} else { } else {
Kind::Simple Kind::Simple
}; };
let type_ = Type::new(name, oid, kind, schema); let type_ = Type::new(name, oid, kind, schema);
client.set_type(oid, &type_); cache.set_type(oid, &type_);
Ok(type_) Ok(type_)
} }
fn get_type_rec<'a>( fn get_type_rec<'a>(
client: &'a Arc<InnerClient>, client: &'a mut InnerClient,
cache: &'a mut CachedTypeInfo,
oid: Oid, oid: Oid,
) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> { ) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> {
Box::pin(get_type(client, oid)) Box::pin(get_type(client, cache, oid))
} }
async fn typeinfo_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> { async fn typeinfo_statement<'c>(
if let Some(stmt) = client.typeinfo() { client: &mut InnerClient,
return Ok(stmt); cache: &'c mut CachedTypeInfo,
) -> Result<&'c Statement, Error> {
if cache.typeinfo().is_some() {
// needed to get around a borrow checker limitation
return Ok(cache.typeinfo().unwrap());
} }
let stmt = match prepare_rec(client, TYPEINFO_QUERY, &[]).await { let stmt = match prepare_rec(client, cache, TYPEINFO_QUERY, &[]).await {
Ok(stmt) => stmt, Ok(stmt) => stmt,
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {
prepare_rec(client, TYPEINFO_FALLBACK_QUERY, &[]).await? prepare_rec(client, cache, TYPEINFO_FALLBACK_QUERY, &[]).await?
} }
Err(e) => return Err(e), Err(e) => return Err(e),
}; };
client.set_typeinfo(&stmt); Ok(cache.set_typeinfo(stmt))
Ok(stmt)
} }
async fn get_enum_variants(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<String>, Error> { async fn get_enum_variants(
let stmt = typeinfo_enum_statement(client).await?; client: &mut InnerClient,
cache: &mut CachedTypeInfo,
oid: Oid,
) -> Result<Vec<String>, Error> {
let stmt = typeinfo_enum_statement(client, cache).await?;
query::query(client, stmt, slice_iter(&[&oid])) let mut out = vec![];
.await?
.and_then(|row| async move { row.try_get(0) }) let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
.try_collect() while let Some(row) = rows.next().await {
.await out.push(row?.try_get(stmt.columns(), 0)?)
}
Ok(out)
} }
async fn typeinfo_enum_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> { async fn typeinfo_enum_statement<'c>(
if let Some(stmt) = client.typeinfo_enum() { client: &mut InnerClient,
return Ok(stmt); cache: &'c mut CachedTypeInfo,
) -> Result<&'c Statement, Error> {
if cache.typeinfo_enum().is_some() {
// needed to get around a borrow checker limitation
return Ok(cache.typeinfo_enum().unwrap());
} }
let stmt = match prepare_rec(client, TYPEINFO_ENUM_QUERY, &[]).await { let stmt = match prepare_rec(client, cache, TYPEINFO_ENUM_QUERY, &[]).await {
Ok(stmt) => stmt, Ok(stmt) => stmt,
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => {
prepare_rec(client, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await? prepare_rec(client, cache, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await?
} }
Err(e) => return Err(e), Err(e) => return Err(e),
}; };
client.set_typeinfo_enum(&stmt); Ok(cache.set_typeinfo_enum(stmt))
Ok(stmt)
} }
async fn get_composite_fields(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<Field>, Error> { async fn get_composite_fields(
let stmt = typeinfo_composite_statement(client).await?; client: &mut InnerClient,
cache: &mut CachedTypeInfo,
oid: Oid,
) -> Result<Vec<Field>, Error> {
let stmt = typeinfo_composite_statement(client, cache).await?;
let rows = query::query(client, stmt, slice_iter(&[&oid])) let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
.await?
.try_collect::<Vec<_>>() let mut oids = vec![];
.await?; while let Some(row) = rows.next().await {
let row = row?;
let name = row.try_get(stmt.columns(), 0)?;
let oid = row.try_get(stmt.columns(), 1)?;
oids.push((name, oid));
}
let mut fields = vec![]; let mut fields = vec![];
for row in rows { for (name, oid) in oids {
let name = row.try_get(0)?; let type_ = get_type_rec(client, cache, oid).await?;
let oid = row.try_get(1)?;
let type_ = get_type_rec(client, oid).await?;
fields.push(Field::new(name, type_)); fields.push(Field::new(name, type_));
} }
Ok(fields) Ok(fields)
} }
async fn typeinfo_composite_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> { async fn typeinfo_composite_statement<'c>(
if let Some(stmt) = client.typeinfo_composite() { client: &mut InnerClient,
return Ok(stmt); cache: &'c mut CachedTypeInfo,
) -> Result<&'c Statement, Error> {
if cache.typeinfo_composite().is_some() {
// needed to get around a borrow checker limitation
return Ok(cache.typeinfo_composite().unwrap());
} }
let stmt = prepare_rec(client, TYPEINFO_COMPOSITE_QUERY, &[]).await?; let stmt = prepare_rec(client, cache, TYPEINFO_COMPOSITE_QUERY, &[]).await?;
client.set_typeinfo_composite(&stmt); Ok(cache.set_typeinfo_composite(stmt))
Ok(stmt)
} }

View File

@@ -14,7 +14,6 @@ use postgres_types2::{Format, ToSql, Type};
use std::fmt; use std::fmt;
use std::marker::PhantomPinned; use std::marker::PhantomPinned;
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]); struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]);
@@ -26,10 +25,10 @@ impl fmt::Debug for BorrowToSqlParamsDebug<'_> {
} }
pub async fn query<'a, I>( pub async fn query<'a, I>(
client: &InnerClient, client: &mut InnerClient,
statement: Statement, statement: &Statement,
params: I, params: I,
) -> Result<RowStream, Error> ) -> Result<RawRowStream, Error>
where where
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>, I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
I::IntoIter: ExactSizeIterator, I::IntoIter: ExactSizeIterator,
@@ -41,13 +40,12 @@ where
statement.name(), statement.name(),
BorrowToSqlParamsDebug(params.as_slice()), BorrowToSqlParamsDebug(params.as_slice()),
); );
encode(client, &statement, params)? encode(client, statement, params)?
} else { } else {
encode(client, &statement, params)? encode(client, statement, params)?
}; };
let responses = start(client, buf).await?; let responses = start(client, buf).await?;
Ok(RowStream { Ok(RawRowStream {
statement,
responses, responses,
command_tag: None, command_tag: None,
status: ReadyForQueryStatus::Unknown, status: ReadyForQueryStatus::Unknown,
@@ -57,7 +55,7 @@ where
} }
pub async fn query_txt<S, I>( pub async fn query_txt<S, I>(
client: &Arc<InnerClient>, client: &mut InnerClient,
query: &str, query: &str,
params: I, params: I,
) -> Result<RowStream, Error> ) -> Result<RowStream, Error>
@@ -157,49 +155,6 @@ where
}) })
} }
pub async fn execute<'a, I>(
client: &InnerClient,
statement: Statement,
params: I,
) -> Result<u64, Error>
where
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
I::IntoIter: ExactSizeIterator,
{
let buf = if log_enabled!(Level::Debug) {
let params = params.into_iter().collect::<Vec<_>>();
debug!(
"executing statement {} with parameters: {:?}",
statement.name(),
BorrowToSqlParamsDebug(params.as_slice()),
);
encode(client, &statement, params)?
} else {
encode(client, &statement, params)?
};
let mut responses = start(client, buf).await?;
let mut rows = 0;
loop {
match responses.next().await? {
Message::DataRow(_) => {}
Message::CommandComplete(body) => {
rows = body
.tag()
.map_err(Error::parse)?
.rsplit(' ')
.next()
.unwrap()
.parse()
.unwrap_or(0);
}
Message::EmptyQueryResponse => rows = 0,
Message::ReadyForQuery(_) => return Ok(rows),
_ => return Err(Error::unexpected_message()),
}
}
}
async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> { async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
@@ -211,7 +166,11 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
Ok(responses) Ok(responses)
} }
pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result<Bytes, Error> pub fn encode<'a, I>(
client: &mut InnerClient,
statement: &Statement,
params: I,
) -> Result<Bytes, Error>
where where
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>, I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
I::IntoIter: ExactSizeIterator, I::IntoIter: ExactSizeIterator,
@@ -296,11 +255,7 @@ impl Stream for RowStream {
loop { loop {
match ready!(this.responses.poll_next(cx)?) { match ready!(this.responses.poll_next(cx)?) {
Message::DataRow(body) => { Message::DataRow(body) => {
return Poll::Ready(Some(Ok(Row::new( return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
this.statement.clone(),
body,
*this.output_format,
)?)))
} }
Message::EmptyQueryResponse | Message::PortalSuspended => {} Message::EmptyQueryResponse | Message::PortalSuspended => {}
Message::CommandComplete(body) => { Message::CommandComplete(body) => {
@@ -338,3 +293,41 @@ impl RowStream {
self.status self.status
} }
} }
pin_project! {
/// A stream of table rows.
pub struct RawRowStream {
responses: Responses,
command_tag: Option<String>,
output_format: Format,
status: ReadyForQueryStatus,
#[pin]
_p: PhantomPinned,
}
}
impl Stream for RawRowStream {
type Item = Result<Row, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
loop {
match ready!(this.responses.poll_next(cx)?) {
Message::DataRow(body) => {
return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
}
Message::EmptyQueryResponse | Message::PortalSuspended => {}
Message::CommandComplete(body) => {
if let Ok(tag) = body.tag() {
*this.command_tag = Some(tag.to_string());
}
}
Message::ReadyForQuery(status) => {
*this.status = status.into();
return Poll::Ready(None);
}
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
}
}
}
}

View File

@@ -1,103 +1,16 @@
//! Rows. //! Rows.
use crate::row::sealed::{AsName, Sealed};
use crate::simple_query::SimpleColumn;
use crate::statement::Column; use crate::statement::Column;
use crate::types::{FromSql, Type, WrongType}; use crate::types::{FromSql, Type, WrongType};
use crate::{Error, Statement}; use crate::Error;
use fallible_iterator::FallibleIterator; use fallible_iterator::FallibleIterator;
use postgres_protocol2::message::backend::DataRowBody; use postgres_protocol2::message::backend::DataRowBody;
use postgres_types2::{Format, WrongFormat}; use postgres_types2::{Format, WrongFormat};
use std::fmt; use std::fmt;
use std::ops::Range; use std::ops::Range;
use std::str; use std::str;
use std::sync::Arc;
mod sealed {
pub trait Sealed {}
pub trait AsName {
fn as_name(&self) -> &str;
}
}
impl AsName for Column {
fn as_name(&self) -> &str {
self.name()
}
}
impl AsName for String {
fn as_name(&self) -> &str {
self
}
}
/// A trait implemented by types that can index into columns of a row.
///
/// This cannot be implemented outside of this crate.
pub trait RowIndex: Sealed {
#[doc(hidden)]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName;
}
impl Sealed for usize {}
impl RowIndex for usize {
#[inline]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName,
{
if *self >= columns.len() {
None
} else {
Some(*self)
}
}
}
impl Sealed for str {}
impl RowIndex for str {
#[inline]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName,
{
if let Some(idx) = columns.iter().position(|d| d.as_name() == self) {
return Some(idx);
};
// FIXME ASCII-only case insensitivity isn't really the right thing to
// do. Postgres itself uses a dubious wrapper around tolower and JDBC
// uses the US locale.
columns
.iter()
.position(|d| d.as_name().eq_ignore_ascii_case(self))
}
}
impl<T> Sealed for &T where T: ?Sized + Sealed {}
impl<T> RowIndex for &T
where
T: ?Sized + RowIndex,
{
#[inline]
fn __idx<U>(&self, columns: &[U]) -> Option<usize>
where
U: AsName,
{
T::__idx(*self, columns)
}
}
/// A row of data returned from the database by a query. /// A row of data returned from the database by a query.
pub struct Row { pub struct Row {
statement: Statement,
output_format: Format, output_format: Format,
body: DataRowBody, body: DataRowBody,
ranges: Vec<Option<Range<usize>>>, ranges: Vec<Option<Range<usize>>>,
@@ -105,80 +18,33 @@ pub struct Row {
impl fmt::Debug for Row { impl fmt::Debug for Row {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Row") f.debug_struct("Row").finish()
.field("columns", &self.columns())
.finish()
} }
} }
impl Row { impl Row {
pub(crate) fn new( pub(crate) fn new(
statement: Statement, // statement: Statement,
body: DataRowBody, body: DataRowBody,
output_format: Format, output_format: Format,
) -> Result<Row, Error> { ) -> Result<Row, Error> {
let ranges = body.ranges().collect().map_err(Error::parse)?; let ranges = body.ranges().collect().map_err(Error::parse)?;
Ok(Row { Ok(Row {
statement,
body, body,
ranges, ranges,
output_format, output_format,
}) })
} }
/// Returns information about the columns of data in the row. pub(crate) fn try_get<'a, T>(&'a self, columns: &[Column], idx: usize) -> Result<T, Error>
pub fn columns(&self) -> &[Column] {
self.statement.columns()
}
/// Determines if the row contains no values.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of values in the row.
pub fn len(&self) -> usize {
self.columns().len()
}
/// Deserializes a value from the row.
///
/// The value can be specified either by its numeric index in the row, or by its column name.
///
/// # Panics
///
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
pub fn get<'a, I, T>(&'a self, idx: I) -> T
where where
I: RowIndex + fmt::Display,
T: FromSql<'a>, T: FromSql<'a>,
{ {
match self.get_inner(&idx) { let Some(column) = columns.get(idx) else {
Ok(ok) => ok, return Err(Error::column(idx.to_string()));
Err(err) => panic!("error retrieving column {}: {}", idx, err),
}
}
/// Like `Row::get`, but returns a `Result` rather than panicking.
pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result<T, Error>
where
I: RowIndex + fmt::Display,
T: FromSql<'a>,
{
self.get_inner(&idx)
}
fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result<T, Error>
where
I: RowIndex + fmt::Display,
T: FromSql<'a>,
{
let idx = match idx.__idx(self.columns()) {
Some(idx) => idx,
None => return Err(Error::column(idx.to_string())),
}; };
let ty = self.columns()[idx].type_(); let ty = column.type_();
if !T::accepts(ty) { if !T::accepts(ty) {
return Err(Error::from_sql( return Err(Error::from_sql(
Box::new(WrongType::new::<T>(ty.clone())), Box::new(WrongType::new::<T>(ty.clone())),
@@ -216,85 +82,3 @@ impl Row {
self.body.buffer().len() self.body.buffer().len()
} }
} }
impl AsName for SimpleColumn {
fn as_name(&self) -> &str {
self.name()
}
}
/// A row of data returned from the database by a simple query.
#[derive(Debug)]
pub struct SimpleQueryRow {
columns: Arc<[SimpleColumn]>,
body: DataRowBody,
ranges: Vec<Option<Range<usize>>>,
}
impl SimpleQueryRow {
#[allow(clippy::new_ret_no_self)]
pub(crate) fn new(
columns: Arc<[SimpleColumn]>,
body: DataRowBody,
) -> Result<SimpleQueryRow, Error> {
let ranges = body.ranges().collect().map_err(Error::parse)?;
Ok(SimpleQueryRow {
columns,
body,
ranges,
})
}
/// Returns information about the columns of data in the row.
pub fn columns(&self) -> &[SimpleColumn] {
&self.columns
}
/// Determines if the row contains no values.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of values in the row.
pub fn len(&self) -> usize {
self.columns.len()
}
/// Returns a value from the row.
///
/// The value can be specified either by its numeric index in the row, or by its column name.
///
/// # Panics
///
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
pub fn get<I>(&self, idx: I) -> Option<&str>
where
I: RowIndex + fmt::Display,
{
match self.get_inner(&idx) {
Ok(ok) => ok,
Err(err) => panic!("error retrieving column {}: {}", idx, err),
}
}
/// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking.
pub fn try_get<I>(&self, idx: I) -> Result<Option<&str>, Error>
where
I: RowIndex + fmt::Display,
{
self.get_inner(&idx)
}
fn get_inner<I>(&self, idx: &I) -> Result<Option<&str>, Error>
where
I: RowIndex + fmt::Display,
{
let idx = match idx.__idx(&self.columns) {
Some(idx) => idx,
None => return Err(Error::column(idx.to_string())),
};
let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]);
FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx))
}
}

View File

@@ -1,52 +1,14 @@
use crate::client::{InnerClient, Responses}; use crate::client::InnerClient;
use crate::codec::FrontendMessage; use crate::codec::FrontendMessage;
use crate::connection::RequestMessages; use crate::connection::RequestMessages;
use crate::{Error, ReadyForQueryStatus, SimpleQueryMessage, SimpleQueryRow}; use crate::{Error, ReadyForQueryStatus};
use bytes::Bytes; use bytes::Bytes;
use fallible_iterator::FallibleIterator;
use futures_util::{ready, Stream};
use log::debug; use log::debug;
use pin_project_lite::pin_project;
use postgres_protocol2::message::backend::Message; use postgres_protocol2::message::backend::Message;
use postgres_protocol2::message::frontend; use postgres_protocol2::message::frontend;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
/// Information about a column of a single query row.
#[derive(Debug)]
pub struct SimpleColumn {
name: String,
}
impl SimpleColumn {
pub(crate) fn new(name: String) -> SimpleColumn {
SimpleColumn { name }
}
/// Returns the name of the column.
pub fn name(&self) -> &str {
&self.name
}
}
pub async fn simple_query(client: &InnerClient, query: &str) -> Result<SimpleQueryStream, Error> {
debug!("executing simple query: {}", query);
let buf = encode(client, query)?;
let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
Ok(SimpleQueryStream {
responses,
columns: None,
status: ReadyForQueryStatus::Unknown,
_p: PhantomPinned,
})
}
pub async fn batch_execute( pub async fn batch_execute(
client: &InnerClient, client: &mut InnerClient,
query: &str, query: &str,
) -> Result<ReadyForQueryStatus, Error> { ) -> Result<ReadyForQueryStatus, Error> {
debug!("executing statement batch: {}", query); debug!("executing statement batch: {}", query);
@@ -66,77 +28,9 @@ pub async fn batch_execute(
} }
} }
pub(crate) fn encode(client: &InnerClient, query: &str) -> Result<Bytes, Error> { pub(crate) fn encode(client: &mut InnerClient, query: &str) -> Result<Bytes, Error> {
client.with_buf(|buf| { client.with_buf(|buf| {
frontend::query(query, buf).map_err(Error::encode)?; frontend::query(query, buf).map_err(Error::encode)?;
Ok(buf.split().freeze()) Ok(buf.split().freeze())
}) })
} }
pin_project! {
/// A stream of simple query results.
pub struct SimpleQueryStream {
responses: Responses,
columns: Option<Arc<[SimpleColumn]>>,
status: ReadyForQueryStatus,
#[pin]
_p: PhantomPinned,
}
}
impl SimpleQueryStream {
/// Returns if the connection is ready for querying, with the status of the connection.
///
/// This might be available only after the stream has been exhausted.
pub fn ready_status(&self) -> ReadyForQueryStatus {
self.status
}
}
impl Stream for SimpleQueryStream {
type Item = Result<SimpleQueryMessage, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
loop {
match ready!(this.responses.poll_next(cx)?) {
Message::CommandComplete(body) => {
let rows = body
.tag()
.map_err(Error::parse)?
.rsplit(' ')
.next()
.unwrap()
.parse()
.unwrap_or(0);
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows))));
}
Message::EmptyQueryResponse => {
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0))));
}
Message::RowDescription(body) => {
let columns = body
.fields()
.map(|f| Ok(SimpleColumn::new(f.name().to_string())))
.collect::<Vec<_>>()
.map_err(Error::parse)?
.into();
*this.columns = Some(columns);
}
Message::DataRow(body) => {
let row = match &this.columns {
Some(columns) => SimpleQueryRow::new(columns.clone(), body)?,
None => return Poll::Ready(Some(Err(Error::unexpected_message()))),
};
return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row))));
}
Message::ReadyForQuery(s) => {
*this.status = s.into();
return Poll::Ready(None);
}
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
}
}
}
}

View File

@@ -1,64 +1,33 @@
use crate::client::InnerClient;
use crate::codec::FrontendMessage;
use crate::connection::RequestMessages;
use crate::types::Type; use crate::types::Type;
use postgres_protocol2::{ use postgres_protocol2::{message::backend::Field, Oid};
message::{backend::Field, frontend}, use std::fmt;
Oid,
};
use std::{
fmt,
sync::{Arc, Weak},
};
struct StatementInner { struct StatementInner {
client: Weak<InnerClient>,
name: String, name: String,
params: Vec<Type>, params: Vec<Type>,
columns: Vec<Column>, columns: Vec<Column>,
} }
impl Drop for StatementInner {
fn drop(&mut self) {
if let Some(client) = self.client.upgrade() {
let buf = client.with_buf(|buf| {
frontend::close(b'S', &self.name, buf).unwrap();
frontend::sync(buf);
buf.split().freeze()
});
let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
}
}
}
/// A prepared statement. /// A prepared statement.
/// ///
/// Prepared statements can only be used with the connection that created them. /// Prepared statements can only be used with the connection that created them.
#[derive(Clone)] pub struct Statement(StatementInner);
pub struct Statement(Arc<StatementInner>);
impl Statement { impl Statement {
pub(crate) fn new( pub(crate) fn new(name: String, params: Vec<Type>, columns: Vec<Column>) -> Statement {
inner: &Arc<InnerClient>, Statement(StatementInner {
name: String,
params: Vec<Type>,
columns: Vec<Column>,
) -> Statement {
Statement(Arc::new(StatementInner {
client: Arc::downgrade(inner),
name, name,
params, params,
columns, columns,
})) })
} }
pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement { pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement {
Statement(Arc::new(StatementInner { Statement(StatementInner {
client: Weak::new(),
name: String::new(), name: String::new(),
params, params,
columns, columns,
})) })
} }
pub(crate) fn name(&self) -> &str { pub(crate) fn name(&self) -> &str {

View File

@@ -1,57 +0,0 @@
use crate::to_statement::private::{Sealed, ToStatementType};
use crate::Statement;
mod private {
use crate::{Client, Error, Statement};
pub trait Sealed {}
pub enum ToStatementType<'a> {
Statement(&'a Statement),
Query(&'a str),
}
impl ToStatementType<'_> {
pub async fn into_statement(self, client: &Client) -> Result<Statement, Error> {
match self {
ToStatementType::Statement(s) => Ok(s.clone()),
ToStatementType::Query(s) => client.prepare(s).await,
}
}
}
}
/// A trait abstracting over prepared and unprepared statements.
///
/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which
/// was prepared previously.
///
/// This trait is "sealed" and cannot be implemented by anything outside this crate.
pub trait ToStatement: Sealed {
#[doc(hidden)]
fn __convert(&self) -> ToStatementType<'_>;
}
impl ToStatement for Statement {
fn __convert(&self) -> ToStatementType<'_> {
ToStatementType::Statement(self)
}
}
impl Sealed for Statement {}
impl ToStatement for str {
fn __convert(&self) -> ToStatementType<'_> {
ToStatementType::Query(self)
}
}
impl Sealed for str {}
impl ToStatement for String {
fn __convert(&self) -> ToStatementType<'_> {
ToStatementType::Query(self)
}
}
impl Sealed for String {}

View File

@@ -1,6 +1,5 @@
use crate::codec::FrontendMessage; use crate::codec::FrontendMessage;
use crate::connection::RequestMessages; use crate::connection::RequestMessages;
use crate::query::RowStream;
use crate::{CancelToken, Client, Error, ReadyForQueryStatus}; use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
use postgres_protocol2::message::frontend; use postgres_protocol2::message::frontend;
@@ -19,13 +18,13 @@ impl Drop for Transaction<'_> {
return; return;
} }
let buf = self.client.inner().with_buf(|buf| { let buf = self.client.inner.with_buf(|buf| {
frontend::query("ROLLBACK", buf).unwrap(); frontend::query("ROLLBACK", buf).unwrap();
buf.split().freeze() buf.split().freeze()
}); });
let _ = self let _ = self
.client .client
.inner() .inner
.send(RequestMessages::Single(FrontendMessage::Raw(buf))); .send(RequestMessages::Single(FrontendMessage::Raw(buf)));
} }
} }
@@ -52,23 +51,13 @@ impl<'a> Transaction<'a> {
self.client.batch_execute("ROLLBACK").await self.client.batch_execute("ROLLBACK").await
} }
/// Like `Client::query_raw_txt`.
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = Option<S>>,
I::IntoIter: ExactSizeIterator,
{
self.client.query_raw_txt(statement, params).await
}
/// Like `Client::cancel_token`. /// Like `Client::cancel_token`.
pub fn cancel_token(&self) -> CancelToken { pub fn cancel_token(&self) -> CancelToken {
self.client.cancel_token() self.client.cancel_token()
} }
/// Returns a reference to the underlying `Client`. /// Returns a reference to the underlying `Client`.
pub fn client(&self) -> &Client { pub fn client(&mut self) -> &mut Client {
self.client self.client
} }
} }

View File

@@ -18,7 +18,6 @@ camino = { workspace = true, features = ["serde1"] }
humantime-serde.workspace = true humantime-serde.workspace = true
hyper = { workspace = true, features = ["client"] } hyper = { workspace = true, features = ["client"] }
futures.workspace = true futures.workspace = true
reqwest.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
tokio = { workspace = true, features = ["sync", "fs", "io-util"] } tokio = { workspace = true, features = ["sync", "fs", "io-util"] }

View File

@@ -13,12 +13,10 @@ use std::time::Duration;
use std::time::SystemTime; use std::time::SystemTime;
use super::REMOTE_STORAGE_PREFIX_SEPARATOR; use super::REMOTE_STORAGE_PREFIX_SEPARATOR;
use anyhow::Context;
use anyhow::Result; use anyhow::Result;
use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range}; use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range};
use azure_core::HttpClient;
use azure_core::TransportOptions;
use azure_core::{Continuable, RetryOptions}; use azure_core::{Continuable, RetryOptions};
use azure_identity::DefaultAzureCredential;
use azure_storage::StorageCredentials; use azure_storage::StorageCredentials;
use azure_storage_blobs::blob::CopyStatus; use azure_storage_blobs::blob::CopyStatus;
use azure_storage_blobs::prelude::ClientBuilder; use azure_storage_blobs::prelude::ClientBuilder;
@@ -78,18 +76,12 @@ impl AzureBlobStorage {
let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") { let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") {
StorageCredentials::access_key(account.clone(), access_key) StorageCredentials::access_key(account.clone(), access_key)
} else { } else {
let token_credential = azure_identity::create_default_credential() let token_credential = DefaultAzureCredential::default();
.context("trying to obtain Azure default credentials")?; StorageCredentials::token_credential(Arc::new(token_credential))
StorageCredentials::token_credential(token_credential)
}; };
let builder = ClientBuilder::new(account, credentials) // we have an outer retry
// we have an outer retry let builder = ClientBuilder::new(account, credentials).retry(RetryOptions::none());
.retry(RetryOptions::none())
// Customize transport to configure conneciton pooling
.transport(TransportOptions::new(Self::reqwest_client(
azure_config.conn_pool_size,
)));
let client = builder.container_client(azure_config.container_name.to_owned()); let client = builder.container_client(azure_config.container_name.to_owned());
@@ -114,14 +106,6 @@ impl AzureBlobStorage {
}) })
} }
fn reqwest_client(conn_pool_size: usize) -> Arc<dyn HttpClient> {
let client = reqwest::ClientBuilder::new()
.pool_max_idle_per_host(conn_pool_size)
.build()
.expect("failed to build `reqwest` client");
Arc::new(client)
}
pub fn relative_path_to_name(&self, path: &RemotePath) -> String { pub fn relative_path_to_name(&self, path: &RemotePath) -> String {
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR); assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
let path_string = path.get_path().as_str(); let path_string = path.get_path().as_str();
@@ -560,9 +544,9 @@ impl RemoteStorage for AzureBlobStorage {
.await .await
} }
async fn delete_objects( async fn delete_objects<'a>(
&self, &self,
paths: &[RemotePath], paths: &'a [RemotePath],
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let kind = RequestKind::Delete; let kind = RequestKind::Delete;
@@ -640,10 +624,6 @@ impl RemoteStorage for AzureBlobStorage {
res res
} }
fn max_keys_per_delete(&self) -> usize {
super::MAX_KEYS_PER_DELETE_AZURE
}
async fn copy( async fn copy(
&self, &self,
from: &RemotePath, from: &RemotePath,

View File

@@ -114,16 +114,6 @@ fn default_max_keys_per_list_response() -> Option<i32> {
DEFAULT_MAX_KEYS_PER_LIST_RESPONSE DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
} }
fn default_azure_conn_pool_size() -> usize {
// Conservative default: no connection pooling. At time of writing this is the Azure
// SDK's default as well, due to historic reports of hard-to-reproduce issues
// (https://github.com/hyperium/hyper/issues/2312)
//
// However, using connection pooling is important to avoid exhausting client ports when
// doing huge numbers of requests (https://github.com/neondatabase/cloud/issues/20971)
0
}
impl Debug for S3Config { impl Debug for S3Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("S3Config") f.debug_struct("S3Config")
@@ -156,8 +146,6 @@ pub struct AzureConfig {
pub concurrency_limit: NonZeroUsize, pub concurrency_limit: NonZeroUsize,
#[serde(default = "default_max_keys_per_list_response")] #[serde(default = "default_max_keys_per_list_response")]
pub max_keys_per_list_response: Option<i32>, pub max_keys_per_list_response: Option<i32>,
#[serde(default = "default_azure_conn_pool_size")]
pub conn_pool_size: usize,
} }
fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize { fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
@@ -314,7 +302,6 @@ timeout = '5s'";
container_region = 'westeurope' container_region = 'westeurope'
upload_storage_class = 'INTELLIGENT_TIERING' upload_storage_class = 'INTELLIGENT_TIERING'
timeout = '7s' timeout = '7s'
conn_pool_size = 8
"; ";
let config = parse(toml).unwrap(); let config = parse(toml).unwrap();
@@ -329,7 +316,6 @@ timeout = '5s'";
prefix_in_container: None, prefix_in_container: None,
concurrency_limit: default_remote_storage_azure_concurrency_limit(), concurrency_limit: default_remote_storage_azure_concurrency_limit(),
max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
conn_pool_size: 8,
}), }),
timeout: Duration::from_secs(7), timeout: Duration::from_secs(7),
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT

View File

@@ -70,14 +70,7 @@ pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None; pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
/// As defined in S3 docs /// As defined in S3 docs
/// pub const MAX_KEYS_PER_DELETE: usize = 1000;
/// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html>
pub const MAX_KEYS_PER_DELETE_S3: usize = 1000;
/// As defined in Azure docs
///
/// <https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch>
pub const MAX_KEYS_PER_DELETE_AZURE: usize = 256;
const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/'; const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
@@ -341,20 +334,12 @@ pub trait RemoteStorage: Send + Sync + 'static {
/// If the operation fails because of timeout or cancellation, the root cause of the error will be /// If the operation fails because of timeout or cancellation, the root cause of the error will be
/// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went /// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
/// through. /// through.
async fn delete_objects( async fn delete_objects<'a>(
&self, &self,
paths: &[RemotePath], paths: &'a [RemotePath],
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<()>; ) -> anyhow::Result<()>;
/// Returns the maximum number of keys that a call to [`Self::delete_objects`] can delete without chunking
///
/// The value returned is only an optimization hint, One can pass larger number of objects to
/// `delete_objects` as well.
///
/// The value is guaranteed to be >= 1.
fn max_keys_per_delete(&self) -> usize;
/// Deletes all objects matching the given prefix. /// Deletes all objects matching the given prefix.
/// ///
/// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will /// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
@@ -548,16 +533,6 @@ impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
} }
} }
/// [`RemoteStorage::max_keys_per_delete`]
pub fn max_keys_per_delete(&self) -> usize {
match self {
Self::LocalFs(s) => s.max_keys_per_delete(),
Self::AwsS3(s) => s.max_keys_per_delete(),
Self::AzureBlob(s) => s.max_keys_per_delete(),
Self::Unreliable(s) => s.max_keys_per_delete(),
}
}
/// See [`RemoteStorage::delete_prefix`] /// See [`RemoteStorage::delete_prefix`]
pub async fn delete_prefix( pub async fn delete_prefix(
&self, &self,

View File

@@ -562,9 +562,9 @@ impl RemoteStorage for LocalFs {
} }
} }
async fn delete_objects( async fn delete_objects<'a>(
&self, &self,
paths: &[RemotePath], paths: &'a [RemotePath],
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
for path in paths { for path in paths {
@@ -573,10 +573,6 @@ impl RemoteStorage for LocalFs {
Ok(()) Ok(())
} }
fn max_keys_per_delete(&self) -> usize {
super::MAX_KEYS_PER_DELETE_S3
}
async fn copy( async fn copy(
&self, &self,
from: &RemotePath, from: &RemotePath,

View File

@@ -48,7 +48,7 @@ use crate::{
metrics::{start_counting_cancelled_wait, start_measuring_requests}, metrics::{start_counting_cancelled_wait, start_measuring_requests},
support::PermitCarrying, support::PermitCarrying,
ConcurrencyLimiter, Download, DownloadError, DownloadOpts, Listing, ListingMode, ListingObject, ConcurrencyLimiter, Download, DownloadError, DownloadOpts, Listing, ListingMode, ListingObject,
RemotePath, RemoteStorage, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE_S3, RemotePath, RemoteStorage, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE,
REMOTE_STORAGE_PREFIX_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR,
}; };
@@ -355,7 +355,7 @@ impl S3Bucket {
let kind = RequestKind::Delete; let kind = RequestKind::Delete;
let mut cancel = std::pin::pin!(cancel.cancelled()); let mut cancel = std::pin::pin!(cancel.cancelled());
for chunk in delete_objects.chunks(MAX_KEYS_PER_DELETE_S3) { for chunk in delete_objects.chunks(MAX_KEYS_PER_DELETE) {
let started_at = start_measuring_requests(kind); let started_at = start_measuring_requests(kind);
let req = self let req = self
@@ -813,9 +813,9 @@ impl RemoteStorage for S3Bucket {
.await .await
} }
async fn delete_objects( async fn delete_objects<'a>(
&self, &self,
paths: &[RemotePath], paths: &'a [RemotePath],
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let kind = RequestKind::Delete; let kind = RequestKind::Delete;
@@ -832,10 +832,6 @@ impl RemoteStorage for S3Bucket {
self.delete_oids(&permit, &delete_objects, cancel).await self.delete_oids(&permit, &delete_objects, cancel).await
} }
fn max_keys_per_delete(&self) -> usize {
MAX_KEYS_PER_DELETE_S3
}
async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()> { async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()> {
let paths = std::array::from_ref(path); let paths = std::array::from_ref(path);
self.delete_objects(paths, cancel).await self.delete_objects(paths, cancel).await

View File

@@ -181,9 +181,9 @@ impl RemoteStorage for UnreliableWrapper {
self.delete_inner(path, true, cancel).await self.delete_inner(path, true, cancel).await
} }
async fn delete_objects( async fn delete_objects<'a>(
&self, &self,
paths: &[RemotePath], paths: &'a [RemotePath],
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
self.attempt(RemoteOp::DeleteObjects(paths.to_vec()))?; self.attempt(RemoteOp::DeleteObjects(paths.to_vec()))?;
@@ -203,10 +203,6 @@ impl RemoteStorage for UnreliableWrapper {
Ok(()) Ok(())
} }
fn max_keys_per_delete(&self) -> usize {
self.inner.max_keys_per_delete()
}
async fn copy( async fn copy(
&self, &self,
from: &RemotePath, from: &RemotePath,

View File

@@ -218,7 +218,6 @@ async fn create_azure_client(
prefix_in_container: Some(format!("test_{millis}_{random:08x}/")), prefix_in_container: Some(format!("test_{millis}_{random:08x}/")),
concurrency_limit: NonZeroUsize::new(100).unwrap(), concurrency_limit: NonZeroUsize::new(100).unwrap(),
max_keys_per_list_response, max_keys_per_list_response,
conn_pool_size: 8,
}), }),
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT, small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,

View File

@@ -5,9 +5,6 @@ edition.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
const_format.workspace = true
serde.workspace = true serde.workspace = true
postgres_ffi.workspace = true const_format.workspace = true
pq_proto.workspace = true
tokio.workspace = true
utils.workspace = true utils.workspace = true

View File

@@ -1,27 +1,10 @@
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)] #![deny(clippy::undocumented_unsafe_blocks)]
use const_format::formatcp; use const_format::formatcp;
use pq_proto::SystemId;
use serde::{Deserialize, Serialize};
/// Public API types /// Public API types
pub mod models; pub mod models;
/// Consensus logical timestamp. Note: it is a part of sk control file.
pub type Term = u64;
pub const INVALID_TERM: Term = 0;
/// Information about Postgres. Safekeeper gets it once and then verifies all
/// further connections from computes match. Note: it is a part of sk control
/// file.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfo {
/// Postgres server version
pub pg_version: u32,
pub system_id: SystemId,
pub wal_seg_size: u32,
}
pub const DEFAULT_PG_LISTEN_PORT: u16 = 5454; pub const DEFAULT_PG_LISTEN_PORT: u16 = 5454;
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}"); pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");

View File

@@ -1,23 +1,10 @@
//! Types used in safekeeper http API. Many of them are also reused internally.
use postgres_ffi::TimestampTz;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use tokio::time::Instant;
use utils::{ use utils::{
id::{NodeId, TenantId, TenantTimelineId, TimelineId}, id::{NodeId, TenantId, TimelineId},
lsn::Lsn, lsn::Lsn,
pageserver_feedback::PageserverFeedback,
}; };
use crate::{ServerInfo, Term};
#[derive(Debug, Serialize)]
pub struct SafekeeperStatus {
pub id: NodeId,
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct TimelineCreateRequest { pub struct TimelineCreateRequest {
pub tenant_id: TenantId, pub tenant_id: TenantId,
@@ -31,161 +18,6 @@ pub struct TimelineCreateRequest {
pub local_start_lsn: Option<Lsn>, pub local_start_lsn: Option<Lsn>,
} }
/// Same as TermLsn, but serializes LSN using display serializer
/// in Postgres format, i.e. 0/FFFFFFFF. Used only for the API response.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct TermSwitchApiEntry {
pub term: Term,
pub lsn: Lsn,
}
/// Augment AcceptorState with last_log_term for convenience
#[derive(Debug, Serialize, Deserialize)]
pub struct AcceptorStateStatus {
pub term: Term,
pub epoch: Term, // aka last_log_term, old `epoch` name is left for compatibility
pub term_history: Vec<TermSwitchApiEntry>,
}
/// Things safekeeper should know about timeline state on peers.
/// Used as both model and internally.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerInfo {
pub sk_id: NodeId,
pub term: Term,
/// Term of the last entry.
pub last_log_term: Term,
/// LSN of the last record.
pub flush_lsn: Lsn,
pub commit_lsn: Lsn,
/// Since which LSN safekeeper has WAL.
pub local_start_lsn: Lsn,
/// When info was received. Serde annotations are not very useful but make
/// the code compile -- we don't rely on this field externally.
#[serde(skip)]
#[serde(default = "Instant::now")]
pub ts: Instant,
pub pg_connstr: String,
pub http_connstr: String,
}
pub type FullTransactionId = u64;
/// Hot standby feedback received from replica
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct HotStandbyFeedback {
pub ts: TimestampTz,
pub xmin: FullTransactionId,
pub catalog_xmin: FullTransactionId,
}
pub const INVALID_FULL_TRANSACTION_ID: FullTransactionId = 0;
impl HotStandbyFeedback {
pub fn empty() -> HotStandbyFeedback {
HotStandbyFeedback {
ts: 0,
xmin: 0,
catalog_xmin: 0,
}
}
}
/// Standby status update
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct StandbyReply {
pub write_lsn: Lsn, // The location of the last WAL byte + 1 received and written to disk in the standby.
pub flush_lsn: Lsn, // The location of the last WAL byte + 1 flushed to disk in the standby.
pub apply_lsn: Lsn, // The location of the last WAL byte + 1 applied in the standby.
pub reply_ts: TimestampTz, // The client's system clock at the time of transmission, as microseconds since midnight on 2000-01-01.
pub reply_requested: bool,
}
impl StandbyReply {
pub fn empty() -> Self {
StandbyReply {
write_lsn: Lsn::INVALID,
flush_lsn: Lsn::INVALID,
apply_lsn: Lsn::INVALID,
reply_ts: 0,
reply_requested: false,
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct StandbyFeedback {
pub reply: StandbyReply,
pub hs_feedback: HotStandbyFeedback,
}
impl StandbyFeedback {
pub fn empty() -> Self {
StandbyFeedback {
reply: StandbyReply::empty(),
hs_feedback: HotStandbyFeedback::empty(),
}
}
}
/// Receiver is either pageserver or regular standby, which have different
/// feedbacks.
/// Used as both model and internally.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum ReplicationFeedback {
Pageserver(PageserverFeedback),
Standby(StandbyFeedback),
}
/// Uniquely identifies a WAL service connection. Logged in spans for
/// observability.
pub type ConnectionId = u32;
/// Serialize is used only for json'ing in API response. Also used internally.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalSenderState {
pub ttid: TenantTimelineId,
pub addr: SocketAddr,
pub conn_id: ConnectionId,
// postgres application_name
pub appname: Option<String>,
pub feedback: ReplicationFeedback,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalReceiverState {
/// None means it is recovery initiated by us (this safekeeper).
pub conn_id: Option<ConnectionId>,
pub status: WalReceiverStatus,
}
/// Walreceiver status. Currently only whether it passed voting stage and
/// started receiving the stream, but it is easy to add more if needed.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum WalReceiverStatus {
Voting,
Streaming,
}
/// Info about timeline on safekeeper ready for reporting.
#[derive(Debug, Serialize, Deserialize)]
pub struct TimelineStatus {
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub acceptor_state: AcceptorStateStatus,
pub pg_info: ServerInfo,
pub flush_lsn: Lsn,
pub timeline_start_lsn: Lsn,
pub local_start_lsn: Lsn,
pub commit_lsn: Lsn,
pub backup_lsn: Lsn,
pub peer_horizon_lsn: Lsn,
pub remote_consistent_lsn: Lsn,
pub peers: Vec<PeerInfo>,
pub walsenders: Vec<WalSenderState>,
pub walreceivers: Vec<WalReceiverState>,
}
fn lsn_invalid() -> Lsn { fn lsn_invalid() -> Lsn {
Lsn::INVALID Lsn::INVALID
} }

View File

@@ -15,20 +15,17 @@ arc-swap.workspace = true
sentry.workspace = true sentry.workspace = true
async-compression.workspace = true async-compression.workspace = true
anyhow.workspace = true anyhow.workspace = true
backtrace.workspace = true
bincode.workspace = true bincode.workspace = true
bytes.workspace = true bytes.workspace = true
camino.workspace = true camino.workspace = true
chrono.workspace = true chrono.workspace = true
diatomic-waker.workspace = true diatomic-waker.workspace = true
flate2.workspace = true
git-version.workspace = true git-version.workspace = true
hex = { workspace = true, features = ["serde"] } hex = { workspace = true, features = ["serde"] }
humantime.workspace = true humantime.workspace = true
hyper0 = { workspace = true, features = ["full"] } hyper0 = { workspace = true, features = ["full"] }
itertools.workspace = true
fail.workspace = true fail.workspace = true
futures = { workspace = true } futures = { workspace = true}
jemalloc_pprof.workspace = true jemalloc_pprof.workspace = true
jsonwebtoken.workspace = true jsonwebtoken.workspace = true
nix.workspace = true nix.workspace = true

View File

@@ -1,22 +1,15 @@
use crate::auth::{AuthError, Claims, SwappableJwtAuth}; use crate::auth::{AuthError, Claims, SwappableJwtAuth};
use crate::http::error::{api_error_handler, route_error_handler, ApiError}; use crate::http::error::{api_error_handler, route_error_handler, ApiError};
use crate::http::request::{get_query_param, parse_query_param}; use crate::http::request::{get_query_param, parse_query_param};
use crate::pprof;
use ::pprof::protos::Message as _;
use ::pprof::ProfilerGuardBuilder;
use anyhow::{anyhow, Context}; use anyhow::{anyhow, Context};
use bytes::{Bytes, BytesMut};
use hyper::header::{HeaderName, AUTHORIZATION, CONTENT_DISPOSITION}; use hyper::header::{HeaderName, AUTHORIZATION, CONTENT_DISPOSITION};
use hyper::http::HeaderValue; use hyper::http::HeaderValue;
use hyper::Method; use hyper::Method;
use hyper::{header::CONTENT_TYPE, Body, Request, Response}; use hyper::{header::CONTENT_TYPE, Body, Request, Response};
use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder}; use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use regex::Regex;
use routerify::ext::RequestExt; use routerify::ext::RequestExt;
use routerify::{Middleware, RequestInfo, Router, RouterBuilder}; use routerify::{Middleware, RequestInfo, Router, RouterBuilder};
use tokio::sync::{mpsc, Mutex};
use tokio_stream::wrappers::ReceiverStream;
use tokio_util::io::ReaderStream; use tokio_util::io::ReaderStream;
use tracing::{debug, info, info_span, warn, Instrument}; use tracing::{debug, info, info_span, warn, Instrument};
@@ -25,6 +18,11 @@ use std::io::Write as _;
use std::str::FromStr; use std::str::FromStr;
use std::time::Duration; use std::time::Duration;
use bytes::{Bytes, BytesMut};
use pprof::protos::Message as _;
use tokio::sync::{mpsc, Mutex};
use tokio_stream::wrappers::ReceiverStream;
static SERVE_METRICS_COUNT: Lazy<IntCounter> = Lazy::new(|| { static SERVE_METRICS_COUNT: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!( register_int_counter!(
"libmetrics_metric_handler_requests_total", "libmetrics_metric_handler_requests_total",
@@ -367,7 +365,7 @@ pub async fn profile_cpu_handler(req: Request<Body>) -> Result<Response<Body>, A
// Take the profile. // Take the profile.
let report = tokio::task::spawn_blocking(move || { let report = tokio::task::spawn_blocking(move || {
let guard = ProfilerGuardBuilder::default() let guard = pprof::ProfilerGuardBuilder::default()
.frequency(frequency_hz) .frequency(frequency_hz)
.blocklist(&["libc", "libgcc", "pthread", "vdso"]) .blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()?; .build()?;
@@ -459,34 +457,10 @@ pub async fn profile_heap_handler(req: Request<Body>) -> Result<Response<Body>,
} }
Format::Pprof => { Format::Pprof => {
let data = tokio::task::spawn_blocking(move || { let data = tokio::task::spawn_blocking(move || prof_ctl.dump_pprof())
let bytes = prof_ctl.dump_pprof()?; .await
// Symbolize the profile. .map_err(|join_err| ApiError::InternalServerError(join_err.into()))?
// TODO: consider moving this upstream to jemalloc_pprof and avoiding the .map_err(ApiError::InternalServerError)?;
// serialization roundtrip.
static STRIP_FUNCTIONS: Lazy<Vec<(Regex, bool)>> = Lazy::new(|| {
// Functions to strip from profiles. If true, also remove child frames.
vec![
(Regex::new("^__rust").unwrap(), false),
(Regex::new("^_start$").unwrap(), false),
(Regex::new("^irallocx_prof").unwrap(), true),
(Regex::new("^prof_alloc_prep").unwrap(), true),
(Regex::new("^std::rt::lang_start").unwrap(), false),
(Regex::new("^std::sys::backtrace::__rust").unwrap(), false),
]
});
let profile = pprof::decode(&bytes)?;
let profile = pprof::symbolize(profile)?;
let profile = pprof::strip_locations(
profile,
&["libc", "libgcc", "pthread", "vdso"],
&STRIP_FUNCTIONS,
);
pprof::encode(&profile)
})
.await
.map_err(|join_err| ApiError::InternalServerError(join_err.into()))?
.map_err(ApiError::InternalServerError)?;
Response::builder() Response::builder()
.status(200) .status(200)
.header(CONTENT_TYPE, "application/octet-stream") .header(CONTENT_TYPE, "application/octet-stream")

View File

@@ -94,10 +94,6 @@ pub mod toml_edit_ext;
pub mod circuit_breaker; pub mod circuit_breaker;
pub mod try_rcu;
pub mod pprof;
// Re-export used in macro. Avoids adding git-version as dep in target crates. // Re-export used in macro. Avoids adding git-version as dep in target crates.
#[doc(hidden)] #[doc(hidden)]
pub use git_version; pub use git_version;

View File

@@ -1,190 +0,0 @@
use flate2::write::{GzDecoder, GzEncoder};
use flate2::Compression;
use itertools::Itertools as _;
use once_cell::sync::Lazy;
use pprof::protos::{Function, Line, Message as _, Profile};
use regex::Regex;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::ffi::c_void;
use std::io::Write as _;
/// Decodes a gzip-compressed Protobuf-encoded pprof profile.
pub fn decode(bytes: &[u8]) -> anyhow::Result<Profile> {
let mut gz = GzDecoder::new(Vec::new());
gz.write_all(bytes)?;
Ok(Profile::parse_from_bytes(&gz.finish()?)?)
}
/// Encodes a pprof profile as gzip-compressed Protobuf.
pub fn encode(profile: &Profile) -> anyhow::Result<Vec<u8>> {
let mut gz = GzEncoder::new(Vec::new(), Compression::default());
profile.write_to_writer(&mut gz)?;
Ok(gz.finish()?)
}
/// Symbolizes a pprof profile using the current binary.
pub fn symbolize(mut profile: Profile) -> anyhow::Result<Profile> {
if !profile.function.is_empty() {
return Ok(profile); // already symbolized
}
// Collect function names.
let mut functions: HashMap<String, Function> = HashMap::new();
let mut strings: HashMap<String, i64> = profile
.string_table
.into_iter()
.enumerate()
.map(|(i, s)| (s, i as i64))
.collect();
// Helper to look up or register a string.
let mut string_id = |s: &str| -> i64 {
// Don't use .entry() to avoid unnecessary allocations.
if let Some(id) = strings.get(s) {
return *id;
}
let id = strings.len() as i64;
strings.insert(s.to_string(), id);
id
};
for loc in &mut profile.location {
if !loc.line.is_empty() {
continue;
}
// Resolve the line and function for each location.
backtrace::resolve(loc.address as *mut c_void, |symbol| {
let Some(symname) = symbol.name() else {
return;
};
let mut name = symname.to_string();
// Strip the Rust monomorphization suffix from the symbol name.
static SUFFIX_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new("::h[0-9a-f]{16}$").expect("invalid regex"));
if let Some(m) = SUFFIX_REGEX.find(&name) {
name.truncate(m.start());
}
let function_id = match functions.get(&name) {
Some(function) => function.id,
None => {
let id = functions.len() as u64 + 1;
let system_name = String::from_utf8_lossy(symname.as_bytes());
let filename = symbol
.filename()
.map(|path| path.to_string_lossy())
.unwrap_or(Cow::Borrowed(""));
let function = Function {
id,
name: string_id(&name),
system_name: string_id(&system_name),
filename: string_id(&filename),
..Default::default()
};
functions.insert(name, function);
id
}
};
loc.line.push(Line {
function_id,
line: symbol.lineno().unwrap_or(0) as i64,
..Default::default()
});
});
}
// Store the resolved functions, and mark the mapping as resolved.
profile.function = functions.into_values().sorted_by_key(|f| f.id).collect();
profile.string_table = strings
.into_iter()
.sorted_by_key(|(_, i)| *i)
.map(|(s, _)| s)
.collect();
for mapping in &mut profile.mapping {
mapping.has_functions = true;
mapping.has_filenames = true;
}
Ok(profile)
}
/// Strips locations (stack frames) matching the given mappings (substring) or function names
/// (regex). The function bool specifies whether child frames should be stripped as well.
///
/// The string definitions are left behind in the profile for simplicity, to avoid rewriting all
/// string references.
pub fn strip_locations(
mut profile: Profile,
mappings: &[&str],
functions: &[(Regex, bool)],
) -> Profile {
// Strip mappings.
let mut strip_mappings: HashSet<u64> = HashSet::new();
profile.mapping.retain(|mapping| {
let Some(name) = profile.string_table.get(mapping.filename as usize) else {
return true;
};
if mappings.iter().any(|substr| name.contains(substr)) {
strip_mappings.insert(mapping.id);
return false;
}
true
});
// Strip functions.
let mut strip_functions: HashMap<u64, bool> = HashMap::new();
profile.function.retain(|function| {
let Some(name) = profile.string_table.get(function.name as usize) else {
return true;
};
for (regex, strip_children) in functions {
if regex.is_match(name) {
strip_functions.insert(function.id, *strip_children);
return false;
}
}
true
});
// Strip locations. The bool specifies whether child frames should be stripped too.
let mut strip_locations: HashMap<u64, bool> = HashMap::new();
profile.location.retain(|location| {
for line in &location.line {
if let Some(strip_children) = strip_functions.get(&line.function_id) {
strip_locations.insert(location.id, *strip_children);
return false;
}
}
if strip_mappings.contains(&location.mapping_id) {
strip_locations.insert(location.id, false);
return false;
}
true
});
// Strip sample locations.
for sample in &mut profile.sample {
// First, find the uppermost function with child removal and truncate the stack.
if let Some(truncate) = sample
.location_id
.iter()
.rposition(|id| strip_locations.get(id) == Some(&true))
{
sample.location_id.drain(..=truncate);
}
// Next, strip any individual frames without child removal.
sample
.location_id
.retain(|id| !strip_locations.contains_key(id));
}
profile
}

View File

@@ -1,77 +0,0 @@
//! Try RCU extension lifted from <https://github.com/vorner/arc-swap/issues/94#issuecomment-1987154023>
pub trait ArcSwapExt<T> {
/// [`ArcSwap::rcu`](arc_swap::ArcSwap::rcu), but with Result that short-circuits on error.
fn try_rcu<R, F, E>(&self, f: F) -> Result<T, E>
where
F: FnMut(&T) -> Result<R, E>,
R: Into<T>;
}
impl<T, S> ArcSwapExt<T> for arc_swap::ArcSwapAny<T, S>
where
T: arc_swap::RefCnt,
S: arc_swap::strategy::CaS<T>,
{
fn try_rcu<R, F, E>(&self, mut f: F) -> Result<T, E>
where
F: FnMut(&T) -> Result<R, E>,
R: Into<T>,
{
fn ptr_eq<Base, A, B>(a: A, b: B) -> bool
where
A: arc_swap::AsRaw<Base>,
B: arc_swap::AsRaw<Base>,
{
let a = a.as_raw();
let b = b.as_raw();
std::ptr::eq(a, b)
}
let mut cur = self.load();
loop {
let new = f(&cur)?.into();
let prev = self.compare_and_swap(&*cur, new);
let swapped = ptr_eq(&*cur, &*prev);
if swapped {
return Ok(arc_swap::Guard::into_inner(prev));
} else {
cur = prev;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use arc_swap::ArcSwap;
use std::sync::Arc;
#[test]
fn test_try_rcu_success() {
let swap = ArcSwap::from(Arc::new(42));
let result = swap.try_rcu(|value| -> Result<_, String> { Ok(**value + 1) });
assert!(result.is_ok());
assert_eq!(**swap.load(), 43);
}
#[test]
fn test_try_rcu_error() {
let swap = ArcSwap::from(Arc::new(42));
let result = swap.try_rcu(|value| -> Result<i32, _> {
if **value == 42 {
Err("err")
} else {
Ok(**value + 1)
}
});
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "err");
assert_eq!(**swap.load(), 42);
}
}

View File

@@ -37,7 +37,7 @@ message ValueMeta {
} }
message CompactKey { message CompactKey {
uint64 high = 1; int64 high = 1;
uint64 low = 2; int64 low = 2;
} }

View File

@@ -236,8 +236,8 @@ impl From<ValueMeta> for proto::ValueMeta {
impl From<CompactKey> for proto::CompactKey { impl From<CompactKey> for proto::CompactKey {
fn from(value: CompactKey) -> Self { fn from(value: CompactKey) -> Self {
proto::CompactKey { proto::CompactKey {
high: (value.raw() >> 64) as u64, high: (value.raw() >> 64) as i64,
low: value.raw() as u64, low: value.raw() as i64,
} }
} }
} }
@@ -354,64 +354,3 @@ impl From<proto::CompactKey> for CompactKey {
(((value.high as i128) << 64) | (value.low as i128)).into() (((value.high as i128) << 64) | (value.low as i128)).into()
} }
} }
#[test]
fn test_compact_key_with_large_relnode() {
use pageserver_api::key::Key;
let inputs = vec![
Key {
field1: 0,
field2: 0x100,
field3: 0x200,
field4: 0,
field5: 0x10,
field6: 0x5,
},
Key {
field1: 0,
field2: 0x100,
field3: 0x200,
field4: 0x007FFFFF,
field5: 0x10,
field6: 0x5,
},
Key {
field1: 0,
field2: 0x100,
field3: 0x200,
field4: 0x00800000,
field5: 0x10,
field6: 0x5,
},
Key {
field1: 0,
field2: 0x100,
field3: 0x200,
field4: 0x00800001,
field5: 0x10,
field6: 0x5,
},
Key {
field1: 0,
field2: 0xFFFFFFFF,
field3: 0xFFFFFFFF,
field4: 0xFFFFFFFF,
field5: 0x0,
field6: 0x0,
},
];
for input in inputs {
assert!(input.is_valid_key_on_write_path());
let compact = input.to_compact();
let proto: proto::CompactKey = compact.into();
let from_proto: CompactKey = proto.into();
assert_eq!(
compact, from_proto,
"Round trip failed for key with relnode={:#x}",
input.field4
);
}
}

View File

@@ -30,9 +30,9 @@ fn main() -> anyhow::Result<()> {
let pgxn_neon = std::fs::canonicalize(pgxn_neon)?; let pgxn_neon = std::fs::canonicalize(pgxn_neon)?;
let pgxn_neon = pgxn_neon.to_str().ok_or(anyhow!("Bad non-UTF path"))?; let pgxn_neon = pgxn_neon.to_str().ok_or(anyhow!("Bad non-UTF path"))?;
println!("cargo:rustc-link-lib=static=walproposer");
println!("cargo:rustc-link-lib=static=pgport"); println!("cargo:rustc-link-lib=static=pgport");
println!("cargo:rustc-link-lib=static=pgcommon"); println!("cargo:rustc-link-lib=static=pgcommon");
println!("cargo:rustc-link-lib=static=walproposer");
println!("cargo:rustc-link-search={walproposer_lib_search_str}"); println!("cargo:rustc-link-search={walproposer_lib_search_str}");
// Rebuild crate when libwalproposer.a changes // Rebuild crate when libwalproposer.a changes

View File

@@ -270,18 +270,12 @@ impl Client {
Ok(body) Ok(body)
} }
pub async fn set_tenant_config(&self, req: &TenantConfigRequest) -> Result<()> { pub async fn tenant_config(&self, req: &TenantConfigRequest) -> Result<()> {
let uri = format!("{}/v1/tenant/config", self.mgmt_api_endpoint); let uri = format!("{}/v1/tenant/config", self.mgmt_api_endpoint);
self.request(Method::PUT, &uri, req).await?; self.request(Method::PUT, &uri, req).await?;
Ok(()) Ok(())
} }
pub async fn patch_tenant_config(&self, req: &TenantConfigPatchRequest) -> Result<()> {
let uri = format!("{}/v1/tenant/config", self.mgmt_api_endpoint);
self.request(Method::PATCH, &uri, req).await?;
Ok(())
}
pub async fn tenant_secondary_download( pub async fn tenant_secondary_download(
&self, &self,
tenant_id: TenantShardId, tenant_id: TenantShardId,

View File

@@ -272,7 +272,7 @@ struct CompactionJob<E: CompactionJobExecutor> {
completed: bool, completed: bool,
} }
impl<E> LevelCompactionState<'_, E> impl<'a, E> LevelCompactionState<'a, E>
where where
E: CompactionJobExecutor, E: CompactionJobExecutor,
{ {

View File

@@ -224,8 +224,9 @@ impl<L> Level<L> {
} }
// recalculate depth if this was the last event at this point // recalculate depth if this was the last event at this point
let more_events_at_this_key = let more_events_at_this_key = events_iter
events_iter.peek().is_some_and(|next_e| next_e.key == e.key); .peek()
.map_or(false, |next_e| next_e.key == e.key);
if !more_events_at_this_key { if !more_events_at_this_key {
let mut active_depth = 0; let mut active_depth = 0;
for (_end_lsn, is_image, _idx) in active_set.iter().rev() { for (_end_lsn, is_image, _idx) in active_set.iter().rev() {

View File

@@ -148,7 +148,7 @@ pub trait CompactionDeltaLayer<E: CompactionJobExecutor + ?Sized>: CompactionLay
Self: 'a; Self: 'a;
/// Return all keys in this delta layer. /// Return all keys in this delta layer.
fn load_keys( fn load_keys<'a>(
&self, &self,
ctx: &E::RequestContext, ctx: &E::RequestContext,
) -> impl Future<Output = anyhow::Result<Vec<Self::DeltaEntry<'_>>>> + Send; ) -> impl Future<Output = anyhow::Result<Vec<Self::DeltaEntry<'_>>>> + Send;

View File

@@ -143,7 +143,7 @@ impl interface::CompactionLayer<Key> for Arc<MockDeltaLayer> {
impl interface::CompactionDeltaLayer<MockTimeline> for Arc<MockDeltaLayer> { impl interface::CompactionDeltaLayer<MockTimeline> for Arc<MockDeltaLayer> {
type DeltaEntry<'a> = MockRecord; type DeltaEntry<'a> = MockRecord;
async fn load_keys(&self, _ctx: &MockRequestContext) -> anyhow::Result<Vec<MockRecord>> { async fn load_keys<'a>(&self, _ctx: &MockRequestContext) -> anyhow::Result<Vec<MockRecord>> {
Ok(self.records.clone()) Ok(self.records.clone())
} }
} }

View File

@@ -64,7 +64,7 @@ async fn main_impl(args: Args) -> anyhow::Result<()> {
println!("operating on timeline {}", timeline); println!("operating on timeline {}", timeline);
mgmt_api_client mgmt_api_client
.set_tenant_config(&TenantConfigRequest { .tenant_config(&TenantConfigRequest {
tenant_id: timeline.tenant_id, tenant_id: timeline.tenant_id,
config: TenantConfig::default(), config: TenantConfig::default(),
}) })

View File

@@ -248,7 +248,7 @@ where
} }
} }
impl<W> Basebackup<'_, W> impl<'a, W> Basebackup<'a, W>
where where
W: AsyncWrite + Send + Sync + Unpin, W: AsyncWrite + Send + Sync + Unpin,
{ {

View File

@@ -9,6 +9,7 @@
use remote_storage::GenericRemoteStorage; use remote_storage::GenericRemoteStorage;
use remote_storage::RemotePath; use remote_storage::RemotePath;
use remote_storage::TimeoutOrCancel; use remote_storage::TimeoutOrCancel;
use remote_storage::MAX_KEYS_PER_DELETE;
use std::time::Duration; use std::time::Duration;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::info; use tracing::info;
@@ -130,8 +131,7 @@ impl Deleter {
} }
pub(super) async fn background(&mut self) -> Result<(), DeletionQueueError> { pub(super) async fn background(&mut self) -> Result<(), DeletionQueueError> {
let max_keys_per_delete = self.remote_storage.max_keys_per_delete(); self.accumulator.reserve(MAX_KEYS_PER_DELETE);
self.accumulator.reserve(max_keys_per_delete);
loop { loop {
if self.cancel.is_cancelled() { if self.cancel.is_cancelled() {
@@ -156,14 +156,14 @@ impl Deleter {
match msg { match msg {
DeleterMessage::Delete(mut list) => { DeleterMessage::Delete(mut list) => {
while !list.is_empty() || self.accumulator.len() == max_keys_per_delete { while !list.is_empty() || self.accumulator.len() == MAX_KEYS_PER_DELETE {
if self.accumulator.len() == max_keys_per_delete { if self.accumulator.len() == MAX_KEYS_PER_DELETE {
self.flush().await?; self.flush().await?;
// If we have received this number of keys, proceed with attempting to execute // If we have received this number of keys, proceed with attempting to execute
assert_eq!(self.accumulator.len(), 0); assert_eq!(self.accumulator.len(), 0);
} }
let available_slots = max_keys_per_delete - self.accumulator.len(); let available_slots = MAX_KEYS_PER_DELETE - self.accumulator.len();
let take_count = std::cmp::min(available_slots, list.len()); let take_count = std::cmp::min(available_slots, list.len());
for path in list.drain(list.len() - take_count..) { for path in list.drain(list.len() - take_count..) {
self.accumulator.push(path); self.accumulator.push(path);

View File

@@ -767,27 +767,7 @@ paths:
/v1/tenant/config: /v1/tenant/config:
put: put:
description: | description: |
Update tenant's config by setting it to the provided value Update tenant's config.
Invalid fields in the tenant config will cause the request to be rejected with status 400.
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/TenantConfigRequest"
responses:
"200":
description: OK
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/TenantInfo"
patch:
description: |
Update tenant's config additively by patching the updated fields provided.
Null values unset the field and non-null values upsert it.
Invalid fields in the tenant config will cause the request to be rejected with status 400. Invalid fields in the tenant config will cause the request to be rejected with status 400.
requestBody: requestBody:

View File

@@ -28,7 +28,6 @@ use pageserver_api::models::LsnLease;
use pageserver_api::models::LsnLeaseRequest; use pageserver_api::models::LsnLeaseRequest;
use pageserver_api::models::OffloadedTimelineInfo; use pageserver_api::models::OffloadedTimelineInfo;
use pageserver_api::models::ShardParameters; use pageserver_api::models::ShardParameters;
use pageserver_api::models::TenantConfigPatchRequest;
use pageserver_api::models::TenantDetails; use pageserver_api::models::TenantDetails;
use pageserver_api::models::TenantLocationConfigRequest; use pageserver_api::models::TenantLocationConfigRequest;
use pageserver_api::models::TenantLocationConfigResponse; use pageserver_api::models::TenantLocationConfigResponse;
@@ -1696,47 +1695,7 @@ async fn update_tenant_config_handler(
crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf) crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
.await .await
.map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?; .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
tenant.set_new_tenant_config(new_tenant_conf);
let _ = tenant
.update_tenant_config(|_crnt| Ok(new_tenant_conf.clone()))
.expect("Closure returns Ok()");
json_response(StatusCode::OK, ())
}
async fn patch_tenant_config_handler(
mut request: Request<Body>,
_cancel: CancellationToken,
) -> Result<Response<Body>, ApiError> {
let request_data: TenantConfigPatchRequest = json_request(&mut request).await?;
let tenant_id = request_data.tenant_id;
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let tenant_shard_id = TenantShardId::unsharded(tenant_id);
let tenant = state
.tenant_manager
.get_attached_tenant_shard(tenant_shard_id)?;
tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
let updated = tenant
.update_tenant_config(|crnt| crnt.apply_patch(request_data.config.clone()))
.map_err(ApiError::BadRequest)?;
// This is a legacy API that only operates on attached tenants: the preferred
// API to use is the location_config/ endpoint, which lets the caller provide
// the full LocationConf.
let location_conf = LocationConf::attached_single(
updated,
tenant.get_generation(),
&ShardParameters::default(),
);
crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
.await
.map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
} }
@@ -2077,30 +2036,15 @@ async fn timeline_compact_handler(
parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")? parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")?
.unwrap_or(false); .unwrap_or(false);
let sub_compaction = compact_request
.as_ref()
.map(|r| r.sub_compaction)
.unwrap_or(false);
let sub_compaction_max_job_size_mb = compact_request
.as_ref()
.and_then(|r| r.sub_compaction_max_job_size_mb);
let options = CompactOptions { let options = CompactOptions {
compact_key_range: compact_request compact_range: compact_request
.as_ref() .as_ref()
.and_then(|r| r.compact_key_range.clone()), .and_then(|r| r.compact_range.clone()),
compact_lsn_range: compact_request compact_below_lsn: compact_request.as_ref().and_then(|r| r.compact_below_lsn),
.as_ref()
.and_then(|r| r.compact_lsn_range.clone()),
flags, flags,
sub_compaction,
sub_compaction_max_job_size_mb,
}; };
let scheduled = compact_request let scheduled = compact_request.map(|r| r.scheduled).unwrap_or(false);
.as_ref()
.map(|r| r.scheduled)
.unwrap_or(false);
async { async {
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
@@ -2109,7 +2053,7 @@ async fn timeline_compact_handler(
let tenant = state let tenant = state
.tenant_manager .tenant_manager
.get_attached_tenant_shard(tenant_shard_id)?; .get_attached_tenant_shard(tenant_shard_id)?;
let rx = tenant.schedule_compaction(timeline_id, options).await.map_err(ApiError::InternalServerError)?; let rx = tenant.schedule_compaction(timeline_id, options).await;
if wait_until_scheduled_compaction_done { if wait_until_scheduled_compaction_done {
// It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction. // It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction.
rx.await.ok(); rx.await.ok();
@@ -3336,9 +3280,6 @@ pub fn make_router(
.get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| { .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
api_handler(r, tenant_size_handler) api_handler(r, tenant_size_handler)
}) })
.patch("/v1/tenant/config", |r| {
api_handler(r, patch_tenant_config_handler)
})
.put("/v1/tenant/config", |r| { .put("/v1/tenant/config", |r| {
api_handler(r, update_tenant_config_handler) api_handler(r, update_tenant_config_handler)
}) })

View File

@@ -3,7 +3,7 @@ use metrics::{
register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec, register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
register_int_counter, register_int_counter_pair_vec, register_int_counter_vec, register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec, register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair, Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec, IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
}; };
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
@@ -16,6 +16,7 @@ use postgres_backend::{is_expected_io_error, QueryError};
use pq_proto::framed::ConnectionError; use pq_proto::framed::ConnectionError;
use strum::{EnumCount, VariantNames}; use strum::{EnumCount, VariantNames};
use strum_macros::{IntoStaticStr, VariantNames}; use strum_macros::{IntoStaticStr, VariantNames};
use tracing::warn;
use utils::id::TimelineId; use utils::id::TimelineId;
/// Prometheus histogram buckets (in seconds) for operations in the critical /// Prometheus histogram buckets (in seconds) for operations in the critical
@@ -445,6 +446,15 @@ pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
static FLUSH_WAIT_UPLOAD_TIME: Lazy<GaugeVec> = Lazy::new(|| {
register_gauge_vec!(
"pageserver_flush_wait_upload_seconds",
"Time spent waiting for preceding uploads during layer flush",
&["tenant_id", "shard_id", "timeline_id"]
)
.expect("failed to define a metric")
});
static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| { static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
register_int_gauge_vec!( register_int_gauge_vec!(
"pageserver_last_record_lsn", "pageserver_last_record_lsn",
@@ -1213,163 +1223,54 @@ pub(crate) mod virtual_file_io_engine {
}); });
} }
pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>); pub(crate) struct SmgrOpTimer {
pub(crate) struct SmgrOpTimerInner { global_latency_histo: Histogram,
global_execution_latency_histo: Histogram,
per_timeline_execution_latency_histo: Option<Histogram>,
global_batch_wait_time: Histogram, // Optional because not all op types are tracked per-timeline
per_timeline_batch_wait_time: Histogram, per_timeline_latency_histo: Option<Histogram>,
global_flush_in_progress_micros: IntCounter, start: Instant,
per_timeline_flush_in_progress_micros: IntCounter, throttled: Duration,
op: SmgrQueryType,
timings: SmgrOpTimerState,
}
#[derive(Debug)]
enum SmgrOpTimerState {
Received {
received_at: Instant,
},
ThrottleDoneExecutionStarting {
received_at: Instant,
throttle_started_at: Instant,
started_execution_at: Instant,
},
}
pub(crate) struct SmgrOpFlushInProgress {
flush_started_at: Instant,
global_micros: IntCounter,
per_timeline_micros: IntCounter,
} }
impl SmgrOpTimer { impl SmgrOpTimer {
pub(crate) fn observe_throttle_done_execution_starting(&mut self, throttle: &ThrottleResult) { pub(crate) fn deduct_throttle(&mut self, throttle: &Option<Duration>) {
let inner = self.0.as_mut().expect("other public methods consume self"); let Some(throttle) = throttle else {
match (&mut inner.timings, throttle) { return;
(SmgrOpTimerState::Received { received_at }, throttle) => match throttle { };
ThrottleResult::NotThrottled { start } => { self.throttled += *throttle;
inner.timings = SmgrOpTimerState::ThrottleDoneExecutionStarting {
received_at: *received_at,
throttle_started_at: *start,
started_execution_at: *start,
};
}
ThrottleResult::Throttled { start, end } => {
inner.timings = SmgrOpTimerState::ThrottleDoneExecutionStarting {
received_at: *start,
throttle_started_at: *start,
started_execution_at: *end,
};
}
},
(x, _) => panic!("called in unexpected state: {x:?}"),
}
}
pub(crate) fn observe_smgr_op_completion_and_start_flushing(mut self) -> SmgrOpFlushInProgress {
let (flush_start, inner) = self
.smgr_op_end()
.expect("this method consume self, and the only other caller is drop handler");
let SmgrOpTimerInner {
global_flush_in_progress_micros,
per_timeline_flush_in_progress_micros,
..
} = inner;
SmgrOpFlushInProgress {
flush_started_at: flush_start,
global_micros: global_flush_in_progress_micros,
per_timeline_micros: per_timeline_flush_in_progress_micros,
}
}
/// Returns `None`` if this method has already been called, `Some` otherwise.
fn smgr_op_end(&mut self) -> Option<(Instant, SmgrOpTimerInner)> {
let inner = self.0.take()?;
let now = Instant::now();
let batch;
let execution;
let throttle;
match inner.timings {
SmgrOpTimerState::Received { received_at } => {
batch = (now - received_at).as_secs_f64();
// TODO: use label for dropped requests.
// This is quite rare in practice, only during tenant/pageservers shutdown.
throttle = Duration::ZERO;
execution = Duration::ZERO.as_secs_f64();
}
SmgrOpTimerState::ThrottleDoneExecutionStarting {
received_at,
throttle_started_at,
started_execution_at,
} => {
batch = (throttle_started_at - received_at).as_secs_f64();
throttle = started_execution_at - throttle_started_at;
execution = (now - started_execution_at).as_secs_f64();
}
}
// update time spent in batching
inner.global_batch_wait_time.observe(batch);
inner.per_timeline_batch_wait_time.observe(batch);
// time spent in throttle metric is updated by throttle impl
let _ = throttle;
// update metrics for execution latency
inner.global_execution_latency_histo.observe(execution);
if let Some(per_timeline_execution_latency_histo) =
&inner.per_timeline_execution_latency_histo
{
per_timeline_execution_latency_histo.observe(execution);
}
Some((now, inner))
} }
} }
impl Drop for SmgrOpTimer { impl Drop for SmgrOpTimer {
fn drop(&mut self) { fn drop(&mut self) {
self.smgr_op_end(); let elapsed = self.start.elapsed();
}
}
impl SmgrOpFlushInProgress { let elapsed = match elapsed.checked_sub(self.throttled) {
pub(crate) async fn measure<Fut, O>(mut self, mut fut: Fut) -> O Some(elapsed) => elapsed,
where None => {
Fut: std::future::Future<Output = O>, use utils::rate_limit::RateLimit;
{ static LOGGED: Lazy<Mutex<enum_map::EnumMap<SmgrQueryType, RateLimit>>> =
let mut fut = std::pin::pin!(fut); Lazy::new(|| {
Mutex::new(enum_map::EnumMap::from_array(std::array::from_fn(|_| {
let now = Instant::now(); RateLimit::new(Duration::from_secs(10))
// Whenever observe_guard gets called, or dropped, })))
// it adds the time elapsed since its last call to metrics. });
// Last call is tracked in `now`. let mut guard = LOGGED.lock().unwrap();
let mut observe_guard = scopeguard::guard( let rate_limit = &mut guard[self.op];
|| { rate_limit.call(|| {
let elapsed = now - self.flush_started_at; warn!(op=?self.op, ?elapsed, ?self.throttled, "implementation error: time spent throttled exceeds total request wall clock time");
self.global_micros });
.inc_by(u64::try_from(elapsed.as_micros()).unwrap()); elapsed // un-throttled time, more info than just saturating to 0
self.per_timeline_micros
.inc_by(u64::try_from(elapsed.as_micros()).unwrap());
self.flush_started_at = now;
},
|mut observe| {
observe();
},
);
loop {
match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
Ok(v) => return v,
Err(_timeout) => {
(*observe_guard)();
}
} }
};
let elapsed = elapsed.as_secs_f64();
self.global_latency_histo.observe(elapsed);
if let Some(per_timeline_getpage_histo) = &self.per_timeline_latency_histo {
per_timeline_getpage_histo.observe(elapsed);
} }
} }
} }
@@ -1401,10 +1302,6 @@ pub(crate) struct SmgrQueryTimePerTimeline {
per_timeline_getpage_latency: Histogram, per_timeline_getpage_latency: Histogram,
global_batch_size: Histogram, global_batch_size: Histogram,
per_timeline_batch_size: Histogram, per_timeline_batch_size: Histogram,
global_flush_in_progress_micros: IntCounter,
per_timeline_flush_in_progress_micros: IntCounter,
global_batch_wait_time: Histogram,
per_timeline_batch_wait_time: Histogram,
} }
static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| { static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
@@ -1427,15 +1324,12 @@ static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
// Alias so all histograms recording per-timeline smgr timings use the same buckets.
static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| { static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
register_histogram_vec!( register_histogram_vec!(
"pageserver_smgr_query_seconds", "pageserver_smgr_query_seconds",
"Time spent _executing_ smgr query handling, excluding batch and throttle delays.", "Time spent on smgr query handling, aggegated by query type and tenant/timeline.",
&["smgr_query_type", "tenant_id", "shard_id", "timeline_id"], &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(), CRITICAL_OP_BUCKETS.into(),
) )
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
@@ -1493,7 +1387,7 @@ static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| { static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
register_histogram_vec!( register_histogram_vec!(
"pageserver_smgr_query_seconds_global", "pageserver_smgr_query_seconds_global",
"Like pageserver_smgr_query_seconds, but aggregated to instance level.", "Time spent on smgr query handling, aggregated by query type.",
&["smgr_query_type"], &["smgr_query_type"],
SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(), SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
) )
@@ -1570,45 +1464,6 @@ fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
.set(value.try_into().unwrap()); .set(value.try_into().unwrap());
} }
static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"pageserver_page_service_pagestream_flush_in_progress_micros",
"Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
easily discoverable in monitoring. \
Hence, this is NOT a completion latency historgram.",
&["tenant_id", "shard_id", "timeline_id"],
)
.expect("failed to define a metric")
});
static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pageserver_page_service_pagestream_flush_in_progress_micros_global",
"Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
)
.expect("failed to define a metric")
});
static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
register_histogram_vec!(
"pageserver_page_service_pagestream_batch_wait_time_seconds",
"Time a request spent waiting in its batch until the batch moved to throttle&execution.",
&["tenant_id", "shard_id", "timeline_id"],
SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
)
.expect("failed to define a metric")
});
static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
register_histogram!(
"pageserver_page_service_pagestream_batch_wait_time_seconds_global",
"Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
)
.expect("failed to define a metric")
});
impl SmgrQueryTimePerTimeline { impl SmgrQueryTimePerTimeline {
pub(crate) fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self { pub(crate) fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
let tenant_id = tenant_shard_id.tenant_id.to_string(); let tenant_id = tenant_shard_id.tenant_id.to_string();
@@ -1649,17 +1504,6 @@ impl SmgrQueryTimePerTimeline {
.get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id]) .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
.unwrap(); .unwrap();
let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
.get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
.unwrap();
let global_flush_in_progress_micros =
PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
.get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
.unwrap();
Self { Self {
global_started, global_started,
global_latency, global_latency,
@@ -1667,13 +1511,9 @@ impl SmgrQueryTimePerTimeline {
per_timeline_getpage_started, per_timeline_getpage_started,
global_batch_size, global_batch_size,
per_timeline_batch_size, per_timeline_batch_size,
global_flush_in_progress_micros,
per_timeline_flush_in_progress_micros,
global_batch_wait_time,
per_timeline_batch_wait_time,
} }
} }
pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer { pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, started_at: Instant) -> SmgrOpTimer {
self.global_started[op as usize].inc(); self.global_started[op as usize].inc();
let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) { let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
@@ -1683,17 +1523,13 @@ impl SmgrQueryTimePerTimeline {
None None
}; };
SmgrOpTimer(Some(SmgrOpTimerInner { SmgrOpTimer {
global_execution_latency_histo: self.global_latency[op as usize].clone(), global_latency_histo: self.global_latency[op as usize].clone(),
per_timeline_execution_latency_histo: per_timeline_latency_histo, per_timeline_latency_histo,
timings: SmgrOpTimerState::Received { received_at }, start: started_at,
global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(), op,
per_timeline_flush_in_progress_micros: self throttled: Duration::ZERO,
.per_timeline_flush_in_progress_micros }
.clone(),
global_batch_wait_time: self.global_batch_wait_time.clone(),
per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
}))
} }
pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) { pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) {
@@ -2368,15 +2204,6 @@ pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| WalIngestMet
.expect("failed to define a metric"), .expect("failed to define a metric"),
}); });
pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"pageserver_timeline_wal_records_received",
"Number of WAL records received per shard",
&["tenant_id", "shard_id", "timeline_id"]
)
.expect("failed to define a metric")
});
pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| { pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
register_histogram!( register_histogram!(
"pageserver_wal_redo_seconds", "pageserver_wal_redo_seconds",
@@ -2577,6 +2404,7 @@ pub(crate) struct TimelineMetrics {
shard_id: String, shard_id: String,
timeline_id: String, timeline_id: String,
pub flush_time_histo: StorageTimeMetrics, pub flush_time_histo: StorageTimeMetrics,
pub flush_wait_upload_time_gauge: Gauge,
pub compact_time_histo: StorageTimeMetrics, pub compact_time_histo: StorageTimeMetrics,
pub create_images_time_histo: StorageTimeMetrics, pub create_images_time_histo: StorageTimeMetrics,
pub logical_size_histo: StorageTimeMetrics, pub logical_size_histo: StorageTimeMetrics,
@@ -2603,7 +2431,6 @@ pub(crate) struct TimelineMetrics {
pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>, pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
/// Number of valid LSN leases. /// Number of valid LSN leases.
pub valid_lsn_lease_count_gauge: UIntGauge, pub valid_lsn_lease_count_gauge: UIntGauge,
pub wal_records_received: IntCounter,
shutdown: std::sync::atomic::AtomicBool, shutdown: std::sync::atomic::AtomicBool,
} }
@@ -2622,6 +2449,9 @@ impl TimelineMetrics {
&shard_id, &shard_id,
&timeline_id, &timeline_id,
); );
let flush_wait_upload_time_gauge = FLUSH_WAIT_UPLOAD_TIME
.get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
.unwrap();
let compact_time_histo = StorageTimeMetrics::new( let compact_time_histo = StorageTimeMetrics::new(
StorageTimeOperation::Compact, StorageTimeOperation::Compact,
&tenant_id, &tenant_id,
@@ -2758,15 +2588,12 @@ impl TimelineMetrics {
.get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id]) .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
.unwrap(); .unwrap();
let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
.get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
.unwrap();
TimelineMetrics { TimelineMetrics {
tenant_id, tenant_id,
shard_id, shard_id,
timeline_id, timeline_id,
flush_time_histo, flush_time_histo,
flush_wait_upload_time_gauge,
compact_time_histo, compact_time_histo,
create_images_time_histo, create_images_time_histo,
logical_size_histo, logical_size_histo,
@@ -2793,7 +2620,6 @@ impl TimelineMetrics {
evictions_with_low_residence_duration, evictions_with_low_residence_duration,
), ),
valid_lsn_lease_count_gauge, valid_lsn_lease_count_gauge,
wal_records_received,
shutdown: std::sync::atomic::AtomicBool::default(), shutdown: std::sync::atomic::AtomicBool::default(),
} }
} }
@@ -2816,6 +2642,14 @@ impl TimelineMetrics {
self.resident_physical_size_gauge.get() self.resident_physical_size_gauge.get()
} }
pub(crate) fn flush_wait_upload_time_gauge_add(&self, duration: f64) {
self.flush_wait_upload_time_gauge.add(duration);
crate::metrics::FLUSH_WAIT_UPLOAD_TIME
.get_metric_with_label_values(&[&self.tenant_id, &self.shard_id, &self.timeline_id])
.unwrap()
.add(duration);
}
pub(crate) fn shutdown(&self) { pub(crate) fn shutdown(&self) {
let was_shutdown = self let was_shutdown = self
.shutdown .shutdown
@@ -2833,6 +2667,7 @@ impl TimelineMetrics {
let shard_id = &self.shard_id; let shard_id = &self.shard_id;
let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]); let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]); let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
let _ = FLUSH_WAIT_UPLOAD_TIME.remove_label_values(&[tenant_id, shard_id, timeline_id]);
let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]); let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
{ {
RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get()); RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
@@ -2922,21 +2757,6 @@ impl TimelineMetrics {
shard_id, shard_id,
timeline_id, timeline_id,
]); ]);
let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
tenant_id,
shard_id,
timeline_id,
]);
let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
tenant_id,
shard_id,
timeline_id,
]);
let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
tenant_id,
shard_id,
timeline_id,
]);
} }
} }
@@ -2967,7 +2787,6 @@ use crate::context::{PageContentKind, RequestContext};
use crate::task_mgr::TaskKind; use crate::task_mgr::TaskKind;
use crate::tenant::mgr::TenantSlot; use crate::tenant::mgr::TenantSlot;
use crate::tenant::tasks::BackgroundLoopKind; use crate::tenant::tasks::BackgroundLoopKind;
use crate::tenant::throttle::ThrottleResult;
use crate::tenant::Timeline; use crate::tenant::Timeline;
/// Maintain a per timeline gauge in addition to the global gauge. /// Maintain a per timeline gauge in addition to the global gauge.
@@ -3822,7 +3641,6 @@ pub fn preinitialize_metrics(conf: &'static PageServerConf) {
&REMOTE_ONDEMAND_DOWNLOADED_BYTES, &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
&CIRCUIT_BREAKERS_BROKEN, &CIRCUIT_BREAKERS_BROKEN,
&CIRCUIT_BREAKERS_UNBROKEN, &CIRCUIT_BREAKERS_UNBROKEN,
&PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
] ]
.into_iter() .into_iter()
.for_each(|c| { .for_each(|c| {
@@ -3870,7 +3688,6 @@ pub fn preinitialize_metrics(conf: &'static PageServerConf) {
&WAL_REDO_BYTES_HISTOGRAM, &WAL_REDO_BYTES_HISTOGRAM,
&WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM, &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
&PAGE_SERVICE_BATCH_SIZE_GLOBAL, &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
&PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
] ]
.into_iter() .into_iter()
.for_each(|h| { .for_each(|h| {

View File

@@ -575,10 +575,7 @@ enum BatchedFeMessage {
} }
impl BatchedFeMessage { impl BatchedFeMessage {
async fn throttle_and_record_start_processing( async fn throttle(&mut self, cancel: &CancellationToken) -> Result<(), QueryError> {
&mut self,
cancel: &CancellationToken,
) -> Result<(), QueryError> {
let (shard, tokens, timers) = match self { let (shard, tokens, timers) = match self {
BatchedFeMessage::Exists { shard, timer, .. } BatchedFeMessage::Exists { shard, timer, .. }
| BatchedFeMessage::Nblocks { shard, timer, .. } | BatchedFeMessage::Nblocks { shard, timer, .. }
@@ -606,7 +603,7 @@ impl BatchedFeMessage {
} }
}; };
for timer in timers { for timer in timers {
timer.observe_throttle_done_execution_starting(&throttled); timer.deduct_throttle(&throttled);
} }
Ok(()) Ok(())
} }
@@ -1020,8 +1017,10 @@ impl PageServerHandler {
// Map handler result to protocol behavior. // Map handler result to protocol behavior.
// Some handler errors cause exit from pagestream protocol. // Some handler errors cause exit from pagestream protocol.
// Other handler errors are sent back as an error message and we stay in pagestream protocol. // Other handler errors are sent back as an error message and we stay in pagestream protocol.
let mut timers: smallvec::SmallVec<[_; 1]> =
smallvec::SmallVec::with_capacity(handler_results.len());
for handler_result in handler_results { for handler_result in handler_results {
let (response_msg, timer) = match handler_result { let response_msg = match handler_result {
Err(e) => match &e { Err(e) => match &e {
PageStreamError::Shutdown => { PageStreamError::Shutdown => {
// If we fail to fulfil a request during shutdown, which may be _because_ of // If we fail to fulfil a request during shutdown, which may be _because_ of
@@ -1045,66 +1044,34 @@ impl PageServerHandler {
span.in_scope(|| { span.in_scope(|| {
error!("error reading relation or page version: {full:#}") error!("error reading relation or page version: {full:#}")
}); });
( PagestreamBeMessage::Error(PagestreamErrorResponse {
PagestreamBeMessage::Error(PagestreamErrorResponse { message: e.to_string(),
message: e.to_string(), })
}),
None, // TODO: measure errors
)
} }
}, },
Ok((response_msg, timer)) => (response_msg, Some(timer)), Ok((response_msg, timer)) => {
// Extending the lifetime of the timers so observations on drop
// include the flush time.
timers.push(timer);
response_msg
}
}; };
//
// marshal & transmit response message // marshal & transmit response message
//
pgb_writer.write_message_noflush(&BeMessage::CopyData(&response_msg.serialize()))?; pgb_writer.write_message_noflush(&BeMessage::CopyData(&response_msg.serialize()))?;
// We purposefully don't count flush time into the timer.
//
// The reason is that current compute client will not perform protocol processing
// if the postgres backend process is doing things other than `->smgr_read()`.
// This is especially the case for prefetch.
//
// If the compute doesn't read from the connection, eventually TCP will backpressure
// all the way into our flush call below.
//
// The timer's underlying metric is used for a storage-internal latency SLO and
// we don't want to include latency in it that we can't control.
// And as pointed out above, in this case, we don't control the time that flush will take.
let flushing_timer =
timer.map(|timer| timer.observe_smgr_op_completion_and_start_flushing());
// what we want to do
let flush_fut = pgb_writer.flush();
// metric for how long flushing takes
let flush_fut = match flushing_timer {
Some(flushing_timer) => {
futures::future::Either::Left(flushing_timer.measure(flush_fut))
}
None => futures::future::Either::Right(flush_fut),
};
// do it while respecting cancellation
let _: () = async move {
tokio::select! {
biased;
_ = cancel.cancelled() => {
// We were requested to shut down.
info!("shutdown request received in page handler");
return Err(QueryError::Shutdown)
}
res = flush_fut => {
res?;
}
}
Ok(())
}
// and log the info! line inside the request span
.instrument(span.clone())
.await?;
} }
tokio::select! {
biased;
_ = cancel.cancelled() => {
// We were requested to shut down.
info!("shutdown request received in page handler");
return Err(QueryError::Shutdown)
}
res = pgb_writer.flush() => {
res?;
}
}
drop(timers);
Ok(()) Ok(())
} }
@@ -1233,7 +1200,7 @@ impl PageServerHandler {
} }
}; };
if let Err(cancelled) = msg.throttle_and_record_start_processing(&self.cancel).await { if let Err(cancelled) = msg.throttle(&self.cancel).await {
break cancelled; break cancelled;
} }
@@ -1400,9 +1367,7 @@ impl PageServerHandler {
return Err(e); return Err(e);
} }
}; };
batch batch.throttle(&self.cancel).await?;
.throttle_and_record_start_processing(&self.cancel)
.await?;
self.pagesteam_handle_batched_message(pgb_writer, batch, &cancel, &ctx) self.pagesteam_handle_batched_message(pgb_writer, batch, &cancel, &ctx)
.await?; .await?;
} }

View File

@@ -1242,7 +1242,7 @@ pub struct DatadirModification<'a> {
pending_metadata_bytes: usize, pending_metadata_bytes: usize,
} }
impl DatadirModification<'_> { impl<'a> DatadirModification<'a> {
// When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
// contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
// additionally specify a limit on how much payload a DatadirModification may contain before it should be committed. // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
@@ -1263,7 +1263,7 @@ impl DatadirModification<'_> {
pub(crate) fn has_dirty_data(&self) -> bool { pub(crate) fn has_dirty_data(&self) -> bool {
self.pending_data_batch self.pending_data_batch
.as_ref() .as_ref()
.is_some_and(|b| b.has_data()) .map_or(false, |b| b.has_data())
} }
/// Set the current lsn /// Set the current lsn
@@ -1319,23 +1319,18 @@ impl DatadirModification<'_> {
let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into(); let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
let empty_dir = Value::Image(buf); let empty_dir = Value::Image(buf);
self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
// Initialize SLRUs on shard 0 only: creating these on other shards would be self.pending_directory_entries
// harmless but they'd just be dropped on later compaction. .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
if self.tline.tenant_shard_id.is_shard_zero() { self.put(
self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone()); slru_dir_to_key(SlruKind::MultiXactMembers),
self.pending_directory_entries empty_dir.clone(),
.push((DirectoryKind::SlruSegment(SlruKind::Clog), 0)); );
self.put( self.pending_directory_entries
slru_dir_to_key(SlruKind::MultiXactMembers), .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
empty_dir.clone(), self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
); self.pending_directory_entries
self.pending_directory_entries .push((DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets), 0));
.push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
self.pending_directory_entries
.push((DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets), 0));
}
Ok(()) Ok(())
} }
@@ -2230,7 +2225,7 @@ impl DatadirModification<'_> {
assert!(!self assert!(!self
.pending_data_batch .pending_data_batch
.as_ref() .as_ref()
.is_some_and(|b| b.updates_key(&key))); .map_or(false, |b| b.updates_key(&key)));
} }
} }
@@ -2299,7 +2294,7 @@ pub enum Version<'a> {
Modified(&'a DatadirModification<'a>), Modified(&'a DatadirModification<'a>),
} }
impl Version<'_> { impl<'a> Version<'a> {
async fn get( async fn get(
&self, &self,
timeline: &Timeline, timeline: &Timeline,

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More