mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-29 13:10:37 +00:00
Compare commits
4 Commits
release-74
...
conrad/pro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01475c9e75 | ||
|
|
c835bbba1f | ||
|
|
f94dde4432 | ||
|
|
4991a85704 |
4
.github/actionlint.yml
vendored
4
.github/actionlint.yml
vendored
@@ -21,7 +21,3 @@ config-variables:
|
|||||||
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
||||||
- DEV_AWS_OIDC_ROLE_ARN
|
- DEV_AWS_OIDC_ROLE_ARN
|
||||||
- BENCHMARK_INGEST_TARGET_PROJECTID
|
- BENCHMARK_INGEST_TARGET_PROJECTID
|
||||||
- PGREGRESS_PG16_PROJECT_ID
|
|
||||||
- PGREGRESS_PG17_PROJECT_ID
|
|
||||||
- SLACK_ON_CALL_QA_STAGING_STREAM
|
|
||||||
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN
|
|
||||||
|
|||||||
@@ -7,9 +7,10 @@ inputs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
base-url:
|
base-url:
|
||||||
@@ -83,11 +84,12 @@ runs:
|
|||||||
ALLURE_VERSION: 2.27.0
|
ALLURE_VERSION: 2.27.0
|
||||||
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
|||||||
14
.github/actions/allure-report-store/action.yml
vendored
14
.github/actions/allure-report-store/action.yml
vendored
@@ -8,9 +8,10 @@ inputs:
|
|||||||
unique-key:
|
unique-key:
|
||||||
description: 'string to distinguish different results in the same run'
|
description: 'string to distinguish different results in the same run'
|
||||||
required: true
|
required: true
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -35,11 +36,12 @@ runs:
|
|||||||
env:
|
env:
|
||||||
REPORT_DIR: ${{ inputs.report-dir }}
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
|
|||||||
9
.github/actions/download/action.yml
vendored
9
.github/actions/download/action.yml
vendored
@@ -15,19 +15,10 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: 'OIDC role arn to interract with S3'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Download artifact
|
- name: Download artifact
|
||||||
id: download-artifact
|
id: download-artifact
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
20
.github/actions/run-python-test-set/action.yml
vendored
20
.github/actions/run-python-test-set/action.yml
vendored
@@ -48,9 +48,10 @@ inputs:
|
|||||||
description: 'benchmark durations JSON'
|
description: 'benchmark durations JSON'
|
||||||
required: false
|
required: false
|
||||||
default: '{}'
|
default: '{}'
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -61,7 +62,6 @@ runs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download Neon binaries for the previous release
|
- name: Download Neon binaries for the previous release
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -70,7 +70,6 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download compatibility snapshot
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -82,7 +81,6 @@ runs:
|
|||||||
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
||||||
# shouldn't fail the whole job. Only relevant test should fail.
|
# shouldn't fail the whole job. Only relevant test should fail.
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
@@ -220,19 +218,17 @@ runs:
|
|||||||
# The lack of compatibility snapshot shouldn't fail the job
|
# The lack of compatibility snapshot shouldn't fail the job
|
||||||
# (for example if we didn't run the test for non build-and-test workflow)
|
# (for example if we didn't run the test for non build-and-test workflow)
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-store
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
report-dir: /tmp/test_output/allure/results
|
report-dir: /tmp/test_output/allure/results
|
||||||
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
@@ -14,11 +14,9 @@ runs:
|
|||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Upload coverage data
|
- name: Upload coverage data
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
11
.github/actions/upload/action.yml
vendored
11
.github/actions/upload/action.yml
vendored
@@ -14,10 +14,6 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: "the OIDC role arn for aws auth"
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -57,13 +53,6 @@ runs:
|
|||||||
|
|
||||||
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
||||||
- name: Create benchmark_restore_status table if it does not exist
|
- name: Create benchmark_restore_status table if it does not exist
|
||||||
|
|||||||
20
.github/workflows/_build-and-test-locally.yml
vendored
20
.github/workflows/_build-and-test-locally.yml
vendored
@@ -31,13 +31,12 @@ defaults:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
container:
|
container:
|
||||||
image: ${{ inputs.build-tools-image }}
|
image: ${{ inputs.build-tools-image }}
|
||||||
credentials:
|
credentials:
|
||||||
@@ -206,13 +205,6 @@ jobs:
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 18000 # 5 hours
|
|
||||||
|
|
||||||
- name: Run rust tests
|
- name: Run rust tests
|
||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
@@ -264,7 +256,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
@@ -274,10 +265,6 @@ jobs:
|
|||||||
regress-tests:
|
regress-tests:
|
||||||
# Don't run regression tests on debug arm64 builds
|
# Don't run regression tests on debug arm64 builds
|
||||||
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
statuses: write
|
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
container:
|
container:
|
||||||
@@ -296,7 +283,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }}
|
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
with:
|
with:
|
||||||
@@ -308,7 +295,6 @@ jobs:
|
|||||||
real_s3_region: eu-central-1
|
real_s3_region: eu-central-1
|
||||||
rerun_failed: true
|
rerun_failed: true
|
||||||
pg_version: ${{ matrix.pg_version }}
|
pg_version: ${{ matrix.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
|
|||||||
67
.github/workflows/benchmarking.yml
vendored
67
.github/workflows/benchmarking.yml
vendored
@@ -105,7 +105,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -123,7 +122,7 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# to ensure tests are running in order of appears in the file.
|
# to ensure tests are running in order of appears in the file.
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
@@ -153,7 +152,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -205,7 +204,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Run Logical Replication benchmarks
|
- name: Run Logical Replication benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -216,7 +214,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -233,7 +231,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -245,7 +243,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -308,7 +306,6 @@ jobs:
|
|||||||
"image": [ "'"$image_default"'" ],
|
"image": [ "'"$image_default"'" ],
|
||||||
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
@@ -408,10 +405,9 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-new-many-tables", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
@@ -430,7 +426,7 @@ jobs:
|
|||||||
neonvm-captest-sharding-reuse)
|
neonvm-captest-sharding-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neonvm-captest-new | neonvm-captest-new-many-tables | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -447,26 +443,6 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
# we want to compare Neon project OLTP throughput and latency at scale factor 10 GB
|
|
||||||
# without (neonvm-captest-new)
|
|
||||||
# and with (neonvm-captest-new-many-tables) many relations in the database
|
|
||||||
- name: Create many relations before the run
|
|
||||||
if: contains(fromJson('["neonvm-captest-new-many-tables"]'), matrix.platform)
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_perf_many_relations
|
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
TEST_NUM_RELATIONS: 10000
|
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -476,7 +452,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -491,7 +467,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -506,7 +482,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -524,7 +500,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -635,7 +611,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -650,7 +626,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600
|
extra_params: -m remote_cluster --timeout 21600
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -661,7 +637,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -732,7 +708,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -764,7 +739,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -778,7 +753,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -843,7 +818,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get Connstring Secret Name
|
- name: Get Connstring Secret Name
|
||||||
run: |
|
run: |
|
||||||
@@ -882,7 +856,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -894,7 +868,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -952,7 +926,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -984,7 +957,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -995,7 +968,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
|||||||
223
.github/workflows/build_and_test.yml
vendored
223
.github/workflows/build_and_test.yml
vendored
@@ -21,6 +21,8 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
||||||
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
|
|
||||||
@@ -212,7 +214,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
- name: Run cargo clippy (debug)
|
- name: Run cargo clippy (debug)
|
||||||
run: cargo hack --features default --ignore-unknown-features --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
- name: Check documentation generation
|
- name: Check documentation generation
|
||||||
run: cargo doc --workspace --no-deps --document-private-items
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
@@ -253,15 +255,15 @@ jobs:
|
|||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
build-tag: ${{ needs.tag.outputs.build-tag }}
|
build-tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds.
|
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
||||||
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled.
|
# run without LFC on v17 release only
|
||||||
test-cfg: |
|
test-cfg: |
|
||||||
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"},
|
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v15", "lfc_state": "with-lfc"},
|
{"pg_version":"v15", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v16", "lfc_state": "with-lfc"},
|
{"pg_version":"v16", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "with-lfc"},
|
{"pg_version":"v17", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "without-lfc"}]'
|
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|
||||||
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }}
|
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
||||||
@@ -303,11 +305,6 @@ jobs:
|
|||||||
benchmarks:
|
benchmarks:
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
||||||
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -336,7 +333,6 @@ jobs:
|
|||||||
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
||||||
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -349,11 +345,6 @@ jobs:
|
|||||||
report-benchmarks-failures:
|
report-benchmarks-failures:
|
||||||
needs: [ benchmarks, create-test-report ]
|
needs: [ benchmarks, create-test-report ]
|
||||||
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -369,11 +360,6 @@ jobs:
|
|||||||
create-test-report:
|
create-test-report:
|
||||||
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
||||||
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
outputs:
|
outputs:
|
||||||
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
|
|
||||||
@@ -394,7 +380,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -426,10 +411,6 @@ jobs:
|
|||||||
coverage-report:
|
coverage-report:
|
||||||
if: ${{ !startsWith(github.ref_name, 'release') }}
|
if: ${{ !startsWith(github.ref_name, 'release') }}
|
||||||
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -456,14 +437,12 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get coverage artifact
|
- name: Get coverage artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
@@ -594,10 +573,6 @@ jobs:
|
|||||||
neon-image:
|
neon-image:
|
||||||
needs: [ neon-image-arch, tag ]
|
needs: [ neon-image-arch, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -612,15 +587,11 @@ jobs:
|
|||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Push multi-arch image to ECR
|
- name: Push multi-arch image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -629,10 +600,6 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image-arch:
|
compute-node-image-arch:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -673,15 +640,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
@@ -754,10 +717,6 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
needs: [ compute-node-image-arch, tag ]
|
needs: [ compute-node-image-arch, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -802,15 +761,11 @@ jobs:
|
|||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -840,7 +795,7 @@ jobs:
|
|||||||
- pg: v17
|
- pg: v17
|
||||||
debian: bookworm
|
debian: bookworm
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.37.1
|
VM_BUILDER_VERSION: v0.35.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -935,9 +890,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
id-token: write # for `aws-actions/configure-aws-credentials`
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
VERSIONS: v14 v15 v16 v17
|
VERSIONS: v14 v15 v16 v17
|
||||||
@@ -948,15 +901,12 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- name: Login to dev ECR
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Copy vm-compute-node images to ECR
|
- name: Copy vm-compute-node images to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -1035,11 +985,6 @@ jobs:
|
|||||||
trigger-custom-extensions-build-and-wait:
|
trigger-custom-extensions-build-and-wait:
|
||||||
needs: [ check-permissions, tag ]
|
needs: [ check-permissions, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -1115,79 +1060,12 @@ jobs:
|
|||||||
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
||||||
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Create git tag and GitHub release
|
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
retries: 5
|
|
||||||
script: |
|
|
||||||
const tag = "${{ needs.tag.outputs.build-tag }}";
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRef = await github.rest.git.getRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `tags/${tag}`,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (existingRef.data.object.sha !== context.sha) {
|
|
||||||
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} does not exist. Creating it...`);
|
|
||||||
await github.rest.git.createRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `refs/tags/${tag}`,
|
|
||||||
sha: context.sha,
|
|
||||||
});
|
|
||||||
console.log(`Tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: check how GitHub releases looks for proxy/compute releases and enable them if they're ok
|
|
||||||
if (context.ref !== 'refs/heads/release') {
|
|
||||||
console.log(`GitHub release skipped for ${context.ref}.`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRelease = await github.rest.repos.getReleaseByTag({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag: tag,
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} does not exist. Creating it...`);
|
|
||||||
await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: tag,
|
|
||||||
generate_release_notes: true,
|
|
||||||
});
|
|
||||||
console.log(`Release for tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
@@ -1237,13 +1115,38 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Create git tag
|
||||||
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
|
||||||
|
sha: context.sha,
|
||||||
|
})
|
||||||
|
|
||||||
|
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
|
||||||
|
- name: Create GitHub release
|
||||||
|
if: github.ref_name == 'release'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: "${{ needs.tag.outputs.build-tag }}",
|
||||||
|
generate_release_notes: true,
|
||||||
|
})
|
||||||
|
|
||||||
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
||||||
promote-compatibility-data:
|
promote-compatibility-data:
|
||||||
needs: [ deploy ]
|
needs: [ deploy ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: github.ref_name == 'release' && !failure() && !cancelled()
|
if: github.ref_name == 'release' && !failure() && !cancelled()
|
||||||
|
|
||||||
@@ -1280,12 +1183,6 @@ jobs:
|
|||||||
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
||||||
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Promote compatibility snapshot and Neon artifact
|
- name: Promote compatibility snapshot and Neon artifact
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
|
|||||||
46
.github/workflows/cloud-regress.yml
vendored
46
.github/workflows/cloud-regress.yml
vendored
@@ -19,21 +19,15 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}
|
group: ${{ github.workflow }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
regress:
|
regress:
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
strategy:
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
fail-fast: false
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
matrix:
|
|
||||||
pg-version: [16, 17]
|
|
||||||
|
|
||||||
runs-on: us-east-2
|
runs-on: us-east-2
|
||||||
container:
|
container:
|
||||||
@@ -46,11 +40,9 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Patch the test
|
- name: Patch the test
|
||||||
env:
|
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd "vendor/postgres-v${PG_VERSION}"
|
cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
|
||||||
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch"
|
patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
|
||||||
|
|
||||||
- name: Generate a random password
|
- name: Generate a random password
|
||||||
id: pwgen
|
id: pwgen
|
||||||
@@ -63,9 +55,8 @@ jobs:
|
|||||||
- name: Change tests according to the generated password
|
- name: Change tests according to the generated password
|
||||||
env:
|
env:
|
||||||
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress
|
cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
|
||||||
for fname in sql/*.sql expected/*.out; do
|
for fname in sql/*.sql expected/*.out; do
|
||||||
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
||||||
done
|
done
|
||||||
@@ -81,46 +72,27 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create a new branch
|
|
||||||
id: create-branch
|
|
||||||
uses: ./.github/actions/neon-branch-create
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
|
|
||||||
- name: Run the regression tests
|
- name: Run the regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
test_selection: cloud_regress
|
test_selection: cloud_regress
|
||||||
pg_version: ${{matrix.pg-version}}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}}
|
BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
|
||||||
|
|
||||||
- name: Delete branch
|
|
||||||
if: always()
|
|
||||||
uses: ./.github/actions/neon-branch-delete
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
branch_id: ${{steps.create-branch.outputs.branch_id}}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }}
|
channel-id: "C033QLM5P7D" # on-call-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic pg_regress on staging: ${{ job.status }}
|
Periodic pg_regress on staging: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
|
|||||||
3
.github/workflows/ingest_benchmark.yml
vendored
3
.github/workflows/ingest_benchmark.yml
vendored
@@ -64,7 +64,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: ${{ matrix.target_project == 'new_empty_project' }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
@@ -132,7 +131,7 @@ jobs:
|
|||||||
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
save_perf_report: true
|
save_perf_report: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
||||||
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
||||||
|
|||||||
13
.github/workflows/neon_extra_builds.yml
vendored
13
.github/workflows/neon_extra_builds.yml
vendored
@@ -143,10 +143,6 @@ jobs:
|
|||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
needs: [ check-permissions, build-build-tools-image ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
if: |
|
if: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
@@ -181,18 +177,13 @@ jobs:
|
|||||||
- name: Produce the build stats
|
- name: Produce the build stats
|
||||||
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
id: upload-stats
|
id: upload-stats
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
run: |
|
run: |
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
|||||||
27
.github/workflows/periodic_pagebench.yml
vendored
27
.github/workflows/periodic_pagebench.yml
vendored
@@ -27,11 +27,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trigger_bench_on_ec2_machine_in_eu_central_1:
|
trigger_bench_on_ec2_machine_in_eu_central_1:
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned-bookworm
|
||||||
@@ -43,6 +38,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
||||||
RUN_ID: ${{ github.run_id }}
|
RUN_ID: ${{ github.run_id }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
|
||||||
AWS_DEFAULT_REGION : "eu-central-1"
|
AWS_DEFAULT_REGION : "eu-central-1"
|
||||||
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
||||||
steps:
|
steps:
|
||||||
@@ -53,13 +50,6 @@ jobs:
|
|||||||
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
||||||
run: curl https://ifconfig.me
|
run: curl https://ifconfig.me
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Start EC2 instance and wait for the instance to boot up
|
- name: Start EC2 instance and wait for the instance to boot up
|
||||||
run: |
|
run: |
|
||||||
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
||||||
@@ -134,10 +124,11 @@ jobs:
|
|||||||
cat "test_log_${GITHUB_RUN_ID}"
|
cat "test_log_${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -157,14 +148,6 @@ jobs:
|
|||||||
-H "Authorization: Bearer $API_KEY" \
|
-H "Authorization: Bearer $API_KEY" \
|
||||||
-d ''
|
-d ''
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Stop EC2 instance and wait for the instance to be stopped
|
- name: Stop EC2 instance and wait for the instance to be stopped
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
12
.github/workflows/pg-clients.yml
vendored
12
.github/workflows/pg-clients.yml
vendored
@@ -25,13 +25,11 @@ defaults:
|
|||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write # require for posting a status update
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 16
|
DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: neon-captest-new
|
PLATFORM: neon-captest-new
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
AWS_DEFAULT_REGION: eu-central-1
|
AWS_DEFAULT_REGION: eu-central-1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -96,7 +94,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -113,7 +110,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -130,7 +126,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -164,7 +159,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -181,7 +175,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -198,7 +191,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/pin-build-tools-image.yml
vendored
14
.github/workflows/pin-build-tools-image.yml
vendored
@@ -67,7 +67,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # for `azure/login` and aws auth
|
id-token: write # for `azure/login`
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -75,15 +75,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Azure login
|
- name: Azure login
|
||||||
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
||||||
|
|||||||
1
.github/workflows/pre-merge-checks.yml
vendored
1
.github/workflows/pre-merge-checks.yml
vendored
@@ -63,7 +63,6 @@ jobs:
|
|||||||
if: always()
|
if: always()
|
||||||
permissions:
|
permissions:
|
||||||
statuses: write # for `github.repos.createCommitStatus(...)`
|
statuses: write # for `github.repos.createCommitStatus(...)`
|
||||||
contents: write
|
|
||||||
needs:
|
needs:
|
||||||
- get-changed-files
|
- get-changed-files
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: Create Release Branch
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# It should be kept in sync with if-condition in jobs
|
# It should be kept in sync with if-condition in jobs
|
||||||
- cron: '0 6 * * FRI' # Storage release
|
- cron: '0 6 * * MON' # Storage release
|
||||||
- cron: '0 6 * * THU' # Proxy release
|
- cron: '0 6 * * THU' # Proxy release
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@@ -29,7 +29,7 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
33
CODEOWNERS
33
CODEOWNERS
@@ -1,29 +1,16 @@
|
|||||||
# Autoscaling
|
|
||||||
/libs/vm_monitor/ @neondatabase/autoscaling
|
|
||||||
|
|
||||||
# DevProd
|
|
||||||
/.github/ @neondatabase/developer-productivity
|
/.github/ @neondatabase/developer-productivity
|
||||||
|
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
||||||
# Compute
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
/pgxn/ @neondatabase/compute
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
||||||
/vendor/ @neondatabase/compute
|
|
||||||
/compute/ @neondatabase/compute
|
|
||||||
/compute_tools/ @neondatabase/compute
|
|
||||||
|
|
||||||
# Proxy
|
|
||||||
/libs/proxy/ @neondatabase/proxy
|
/libs/proxy/ @neondatabase/proxy
|
||||||
/proxy/ @neondatabase/proxy
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/storage
|
||||||
# Storage
|
/libs/vm_monitor/ @neondatabase/autoscaling
|
||||||
/pageserver/ @neondatabase/storage
|
/pageserver/ @neondatabase/storage
|
||||||
|
/pgxn/ @neondatabase/compute
|
||||||
|
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/proxy/ @neondatabase/proxy
|
||||||
/safekeeper/ @neondatabase/storage
|
/safekeeper/ @neondatabase/storage
|
||||||
/storage_controller @neondatabase/storage
|
/storage_controller @neondatabase/storage
|
||||||
/storage_scrubber @neondatabase/storage
|
/storage_scrubber @neondatabase/storage
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
/vendor/ @neondatabase/compute
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
|
||||||
/libs/safekeeper_api/ @neondatabase/storage
|
|
||||||
|
|
||||||
# Shared
|
|
||||||
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
|
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
|
||||||
|
|||||||
640
Cargo.lock
generated
640
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
13
Cargo.toml
13
Cargo.toml
@@ -11,7 +11,6 @@ members = [
|
|||||||
"pageserver/pagebench",
|
"pageserver/pagebench",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"safekeeper/client",
|
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"storage_controller",
|
"storage_controller",
|
||||||
"storage_controller/client",
|
"storage_controller/client",
|
||||||
@@ -52,7 +51,10 @@ anyhow = { version = "1.0", features = ["backtrace"] }
|
|||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
||||||
atomic-take = "1.1.0"
|
atomic-take = "1.1.0"
|
||||||
backtrace = "0.3.74"
|
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
||||||
|
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
flate2 = "1.0.26"
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
@@ -214,12 +216,6 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
|
|||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
|
|
||||||
## Azure SDK crates
|
|
||||||
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
|
||||||
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
@@ -235,7 +231,6 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
|
|||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
safekeeper_client = { path = "./safekeeper/client" }
|
|
||||||
desim = { version = "0.1", path = "./libs/desim" }
|
desim = { version = "0.1", path = "./libs/desim" }
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
storage_controller_client = { path = "./storage_controller/client" }
|
storage_controller_client = { path = "./storage_controller/client" }
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ RUN set -e \
|
|||||||
|
|
||||||
# Keep the version the same as in compute/compute-node.Dockerfile and
|
# Keep the version the same as in compute/compute-node.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
ENV SQL_EXPORTER_VERSION=0.16.0
|
ENV SQL_EXPORTER_VERSION=0.13.1
|
||||||
RUN curl -fsSL \
|
RUN curl -fsSL \
|
||||||
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
||||||
--output sql_exporter.tar.gz \
|
--output sql_exporter.tar.gz \
|
||||||
|
|||||||
@@ -35,12 +35,10 @@ RUN case $DEBIAN_VERSION in \
|
|||||||
;; \
|
;; \
|
||||||
esac && \
|
esac && \
|
||||||
apt update && \
|
apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \
|
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
||||||
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
||||||
$VERSION_INSTALLS \
|
$VERSION_INSTALLS
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -115,12 +113,10 @@ ARG DEBIAN_VERSION
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||||
gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
|
||||||
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||||
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||||
protobuf-c-compiler xsltproc \
|
protobuf-c-compiler xsltproc
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
# Postgis 3.5.0 requires SFCGAL 1.4+
|
# Postgis 3.5.0 requires SFCGAL 1.4+
|
||||||
@@ -147,9 +143,9 @@ RUN case "${DEBIAN_VERSION}" in \
|
|||||||
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
||||||
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -GNinja . && ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
DESTDIR=/sfcgal ninja install -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja clean && cp -R /sfcgal/* /
|
make clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
@@ -217,9 +213,9 @@ RUN case "${PG_VERSION}" in \
|
|||||||
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
||||||
@@ -239,9 +235,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
|
||||||
ninja-build python3-dev libncurses5 binutils clang \
|
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# plv8 3.2.3 supports v17
|
# plv8 3.2.3 supports v17
|
||||||
# last release v3.2.3 - Sep 7, 2024
|
# last release v3.2.3 - Sep 7, 2024
|
||||||
@@ -307,10 +301,9 @@ RUN mkdir -p /h3/usr/ && \
|
|||||||
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake .. -GNinja -DBUILD_BENCHMARKS=0 -DCMAKE_BUILD_TYPE=Release \
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
-DBUILD_FUZZERS=0 -DBUILD_FILTERS=0 -DBUILD_GENERATORS=0 -DBUILD_TESTING=0 \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
&& ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/h3 make install && \
|
||||||
DESTDIR=/h3 ninja install && \
|
|
||||||
cp -R /h3/usr / && \
|
cp -R /h3/usr / && \
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
@@ -657,15 +650,14 @@ FROM build-deps AS rdkit-pg-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt-get install --no-install-recommends -y \
|
||||||
libboost-iostreams1.74-dev \
|
libboost-iostreams1.74-dev \
|
||||||
libboost-regex1.74-dev \
|
libboost-regex1.74-dev \
|
||||||
libboost-serialization1.74-dev \
|
libboost-serialization1.74-dev \
|
||||||
libboost-system1.74-dev \
|
libboost-system1.74-dev \
|
||||||
libeigen3-dev \
|
libeigen3-dev \
|
||||||
libboost-all-dev \
|
libboost-all-dev
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# rdkit Release_2024_09_1 supports v17
|
# rdkit Release_2024_09_1 supports v17
|
||||||
# last release Release_2024_09_1 - Sep 27, 2024
|
# last release Release_2024_09_1 - Sep 27, 2024
|
||||||
@@ -701,8 +693,6 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
||||||
-D RDK_TEST_MULTITHREADED=OFF \
|
|
||||||
-D RDK_BUILD_CPP_TESTS=OFF \
|
|
||||||
-D RDK_USE_URF=OFF \
|
-D RDK_USE_URF=OFF \
|
||||||
-D RDK_BUILD_PGSQL=ON \
|
-D RDK_BUILD_PGSQL=ON \
|
||||||
-D RDK_PGSQL_STATIC=ON \
|
-D RDK_PGSQL_STATIC=ON \
|
||||||
@@ -714,10 +704,9 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
||||||
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
||||||
-D CMAKE_BUILD_TYPE=Release \
|
-D CMAKE_BUILD_TYPE=Release \
|
||||||
-GNinja \
|
|
||||||
. && \
|
. && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -860,9 +849,8 @@ FROM build-deps AS rust-extensions-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
apt-get install --no-install-recommends -y curl libclang-dev && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -897,9 +885,8 @@ FROM build-deps AS rust-extensions-build-pgrx12
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
apt-get install --no-install-recommends -y curl libclang-dev && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -927,22 +914,18 @@ FROM rust-extensions-build-pgrx12 AS pg-onnx-build
|
|||||||
|
|
||||||
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
||||||
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
||||||
RUN apt update && apt install --no-install-recommends --no-install-suggests -y \
|
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv && \
|
||||||
python3 python3-pip python3-venv && \
|
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
python3 -m venv venv && \
|
python3 -m venv venv && \
|
||||||
. venv/bin/activate && \
|
. venv/bin/activate && \
|
||||||
python3 -m pip install cmake==3.30.5 && \
|
python3 -m pip install cmake==3.30.5 && \
|
||||||
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
||||||
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
||||||
./build.sh --config Release --parallel --cmake_generator Ninja \
|
./build.sh --config Release --parallel --skip_submodule_sync --skip_tests --allow_running_as_root
|
||||||
--skip_submodule_sync --skip_tests --allow_running_as_root
|
|
||||||
|
|
||||||
|
|
||||||
FROM pg-onnx-build AS pgrag-pg-build
|
FROM pg-onnx-build AS pgrag-pg-build
|
||||||
|
|
||||||
RUN apt update && apt install --no-install-recommends --no-install-suggests -y protobuf-compiler \
|
RUN apt-get install -y protobuf-compiler && \
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
||||||
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
||||||
@@ -1185,25 +1168,6 @@ RUN case "${PG_VERSION}" in \
|
|||||||
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "pg_repack"
|
|
||||||
# compile pg_repack extension
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
|
|
||||||
FROM build-deps AS pg-repack-build
|
|
||||||
ARG PG_VERSION
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
ENV PATH="/usr/local/pgsql/bin/:$PATH"
|
|
||||||
|
|
||||||
RUN wget https://github.com/reorg/pg_repack/archive/refs/tags/ver_1.5.2.tar.gz -O pg_repack.tar.gz && \
|
|
||||||
echo '4516cad42251ed3ad53ff619733004db47d5755acac83f75924cd94d1c4fb681 pg_repack.tar.gz' | sha256sum --check && \
|
|
||||||
mkdir pg_repack-src && cd pg_repack-src && tar xzf ../pg_repack.tar.gz --strip-components=1 -C . && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "neon-pg-ext-build"
|
# Layer "neon-pg-ext-build"
|
||||||
@@ -1249,7 +1213,6 @@ COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-repack-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY pgxn/ pgxn/
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
@@ -1285,7 +1248,7 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Compile the Neon-specific `compute_ctl`, `fast_import`, and `local_proxy` binaries
|
# Compile and run the Neon-specific `compute_ctl` and `fast_import` binaries
|
||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
@@ -1295,7 +1258,7 @@ ENV BUILD_TAG=$BUILD_TAG
|
|||||||
USER nonroot
|
USER nonroot
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
COPY --chown=nonroot . .
|
COPY --chown=nonroot . .
|
||||||
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy
|
RUN cd compute_tools && mold -run cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1316,8 +1279,8 @@ COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_
|
|||||||
|
|
||||||
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& apt update \
|
&& apt-get update \
|
||||||
&& apt install --no-install-suggests --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
@@ -1325,8 +1288,7 @@ RUN set -e \
|
|||||||
automake \
|
automake \
|
||||||
libevent-dev \
|
libevent-dev \
|
||||||
libtool \
|
libtool \
|
||||||
pkg-config \
|
pkg-config
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
||||||
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
||||||
@@ -1338,6 +1300,20 @@ RUN set -e \
|
|||||||
&& make -j $(nproc) dist_man_MANS= \
|
&& make -j $(nproc) dist_man_MANS= \
|
||||||
&& make install dist_man_MANS=
|
&& make install dist_man_MANS=
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile the Neon-specific `local_proxy` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM $REPOSITORY/$IMAGE:$TAG AS local_proxy
|
||||||
|
ARG BUILD_TAG
|
||||||
|
ENV BUILD_TAG=$BUILD_TAG
|
||||||
|
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin local_proxy
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layers "postgres-exporter" and "sql-exporter"
|
# Layers "postgres-exporter" and "sql-exporter"
|
||||||
@@ -1348,7 +1324,7 @@ FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter
|
|||||||
|
|
||||||
# Keep the version the same as in build-tools.Dockerfile and
|
# Keep the version the same as in build-tools.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
FROM burningalchemist/sql_exporter:0.16.0 AS sql-exporter
|
FROM burningalchemist/sql_exporter:0.13.1 AS sql-exporter
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1477,7 +1453,7 @@ COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/
|
|||||||
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
||||||
|
|
||||||
# local_proxy and its config
|
# local_proxy and its config
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
COPY --from=local_proxy --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
||||||
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
||||||
|
|
||||||
# Metrics exporter binaries and configuration files
|
# Metrics exporter binaries and configuration files
|
||||||
@@ -1542,30 +1518,28 @@ RUN apt update && \
|
|||||||
locales \
|
locales \
|
||||||
procps \
|
procps \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
|
||||||
unzip \
|
|
||||||
$VERSION_INSTALLS && \
|
$VERSION_INSTALLS && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
# aws cli is used by fast_import (curl and unzip above are at this time only used for this installation step)
|
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2
|
||||||
|
# used by fast_import
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
ADD https://github.com/peak/s5cmd/releases/download/v2.2.2/s5cmd_2.2.2_linux_$TARGETARCH.deb /tmp/s5cmd.deb
|
||||||
RUN set -ex; \
|
RUN set -ex; \
|
||||||
|
\
|
||||||
|
# Determine the expected checksum based on TARGETARCH
|
||||||
if [ "${TARGETARCH}" = "amd64" ]; then \
|
if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||||
TARGETARCH_ALT="x86_64"; \
|
CHECKSUM="392c385320cd5ffa435759a95af77c215553d967e4b1c0fffe52e4f14c29cf85"; \
|
||||||
CHECKSUM="c9a9df3770a3ff9259cb469b6179e02829687a464e0824d5c32d378820b53a00"; \
|
|
||||||
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
||||||
TARGETARCH_ALT="aarch64"; \
|
CHECKSUM="939bee3cf4b5604ddb00e67f8c157b91d7c7a5b553d1fbb6890fad32894b7b46"; \
|
||||||
CHECKSUM="8181730be7891582b38b028112e81b4899ca817e8c616aad807c9e9d1289223a"; \
|
|
||||||
else \
|
else \
|
||||||
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
||||||
fi; \
|
fi; \
|
||||||
curl -L "https://awscli.amazonaws.com/awscli-exe-linux-${TARGETARCH_ALT}-2.17.5.zip" -o /tmp/awscliv2.zip; \
|
\
|
||||||
echo "${CHECKSUM} /tmp/awscliv2.zip" | sha256sum -c -; \
|
# Compute and validate the checksum
|
||||||
unzip /tmp/awscliv2.zip -d /tmp/awscliv2; \
|
echo "${CHECKSUM} /tmp/s5cmd.deb" | sha256sum -c -
|
||||||
/tmp/awscliv2/aws/install; \
|
RUN dpkg -i /tmp/s5cmd.deb && rm /tmp/s5cmd.deb
|
||||||
rm -rf /tmp/awscliv2.zip /tmp/awscliv2; \
|
|
||||||
true
|
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
metrics: [
|
metrics: [
|
||||||
import 'sql_exporter/checkpoints_req.libsonnet',
|
import 'sql_exporter/checkpoints_req.libsonnet',
|
||||||
import 'sql_exporter/checkpoints_timed.libsonnet',
|
import 'sql_exporter/checkpoints_timed.libsonnet',
|
||||||
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet',
|
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
||||||
import 'sql_exporter/compute_current_lsn.libsonnet',
|
import 'sql_exporter/compute_current_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
||||||
|
|||||||
@@ -19,10 +19,3 @@ max_prepared_statements=0
|
|||||||
admin_users=postgres
|
admin_users=postgres
|
||||||
unix_socket_dir=/tmp/
|
unix_socket_dir=/tmp/
|
||||||
unix_socket_mode=0777
|
unix_socket_mode=0777
|
||||||
|
|
||||||
;; Disable connection logging. It produces a lot of logs that no one looks at,
|
|
||||||
;; and we can get similar log entries from the proxy too. We had incidents in
|
|
||||||
;; the past where the logging significantly stressed the log device or pgbouncer
|
|
||||||
;; itself.
|
|
||||||
log_connections=0
|
|
||||||
log_disconnections=0
|
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
{
|
{
|
||||||
metric_name: 'compute_backpressure_throttling_seconds_total',
|
metric_name: 'compute_backpressure_throttling_seconds',
|
||||||
type: 'counter',
|
type: 'gauge',
|
||||||
help: 'Time compute has spent throttled',
|
help: 'Time compute has spent throttled',
|
||||||
key_labels: null,
|
key_labels: null,
|
||||||
values: [
|
values: [
|
||||||
'throttled',
|
'throttled',
|
||||||
],
|
],
|
||||||
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql',
|
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
|
||||||
}
|
}
|
||||||
@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
|
|||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
SET search_path TO 'addr_nsp';
|
SET search_path TO 'addr_nsp';
|
||||||
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
||||||
index 8475231735..0653946337 100644
|
index 8475231735..1afae5395f 100644
|
||||||
--- a/src/test/regress/expected/password.out
|
--- a/src/test/regress/expected/password.out
|
||||||
+++ b/src/test/regress/expected/password.out
|
+++ b/src/test/regress/expected/password.out
|
||||||
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
||||||
@@ -1006,63 +1006,65 @@ index 8475231735..0653946337 100644
|
|||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
||||||
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd4 |
|
- regress_passwd4 |
|
||||||
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- Rename a role
|
-- Rename a role
|
||||||
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
-- passwords.
|
-- passwords.
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- so it should be hashed with SCRAM-SHA-256.
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- These may look like valid MD5 secrets, but they are not, so they
|
||||||
|
-- should be hashed with SCRAM-SHA-256.
|
||||||
|
-- trailing garbage at the end
|
||||||
|
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- invalid length
|
||||||
|
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-- Changing the SCRAM iteration count
|
-- Changing the SCRAM iteration count
|
||||||
SET scram_iterations = 1024;
|
SET scram_iterations = 1024;
|
||||||
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
||||||
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
||||||
ORDER BY rolname, rolpassword;
|
ORDER BY rolname, rolpassword;
|
||||||
rolname | rolpassword_masked
|
rolname | rolpassword_masked
|
||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
||||||
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
||||||
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
|
||||||
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
-(9 rows)
|
||||||
|
+(5 rows)
|
||||||
|
|
||||||
-- An empty password is not allowed, in any form
|
-- An empty password is not allowed, in any form
|
||||||
CREATE ROLE regress_passwd_empty PASSWORD '';
|
CREATE ROLE regress_passwd_empty PASSWORD '';
|
||||||
NOTICE: empty string is not a valid password, clearing password
|
NOTICE: empty string is not a valid password, clearing password
|
||||||
@@ -1080,37 +1082,56 @@ index 8475231735..0653946337 100644
|
|||||||
-(1 row)
|
-(1 row)
|
||||||
+(0 rows)
|
+(0 rows)
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
-- Test with invalid stored and server keys.
|
||||||
---
|
--
|
||||||
--- The first is valid, to act as a control. The others have too long
|
-- The first is valid, to act as a control. The others have too long
|
||||||
--- stored/server keys. They will be re-hashed.
|
-- stored/server keys. They will be re-hashed.
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
||||||
-- should not contain the original salt.
|
-- should not contain the original salt.
|
||||||
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
||||||
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw
|
FROM pg_authid
|
||||||
|
WHERE rolname LIKE 'regress_passwd_sha_len%'
|
||||||
ORDER BY rolname;
|
ORDER BY rolname;
|
||||||
rolname | is_rolpassword_rehashed
|
- rolname | is_rolpassword_rehashed
|
||||||
-------------------------+-------------------------
|
--------------------------+-------------------------
|
||||||
- regress_passwd_sha_len0 | f
|
- regress_passwd_sha_len0 | f
|
||||||
+ regress_passwd_sha_len0 | t
|
- regress_passwd_sha_len1 | t
|
||||||
regress_passwd_sha_len1 | t
|
- regress_passwd_sha_len2 | t
|
||||||
regress_passwd_sha_len2 | t
|
-(3 rows)
|
||||||
(3 rows)
|
+ rolname | is_rolpassword_rehashed
|
||||||
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7;
|
+---------+-------------------------
|
||||||
|
+(0 rows)
|
||||||
|
|
||||||
|
DROP ROLE regress_passwd1;
|
||||||
|
DROP ROLE regress_passwd2;
|
||||||
|
DROP ROLE regress_passwd3;
|
||||||
|
DROP ROLE regress_passwd4;
|
||||||
|
DROP ROLE regress_passwd5;
|
||||||
|
+ERROR: role "regress_passwd5" does not exist
|
||||||
|
DROP ROLE regress_passwd6;
|
||||||
|
+ERROR: role "regress_passwd6" does not exist
|
||||||
|
DROP ROLE regress_passwd7;
|
||||||
|
+ERROR: role "regress_passwd7" does not exist
|
||||||
DROP ROLE regress_passwd8;
|
DROP ROLE regress_passwd8;
|
||||||
|
+ERROR: role "regress_passwd8" does not exist
|
||||||
DROP ROLE regress_passwd9;
|
DROP ROLE regress_passwd9;
|
||||||
DROP ROLE regress_passwd_empty;
|
DROP ROLE regress_passwd_empty;
|
||||||
+ERROR: role "regress_passwd_empty" does not exist
|
+ERROR: role "regress_passwd_empty" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len0;
|
DROP ROLE regress_passwd_sha_len0;
|
||||||
|
+ERROR: role "regress_passwd_sha_len0" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len1;
|
DROP ROLE regress_passwd_sha_len1;
|
||||||
|
+ERROR: role "regress_passwd_sha_len1" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len2;
|
DROP ROLE regress_passwd_sha_len2;
|
||||||
|
+ERROR: role "regress_passwd_sha_len2" does not exist
|
||||||
|
-- all entries should have been removed
|
||||||
|
SELECT rolname, rolpassword
|
||||||
|
FROM pg_authid
|
||||||
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
||||||
index 5b9dba7b32..cc408dad42 100644
|
index 5b9dba7b32..cc408dad42 100644
|
||||||
--- a/src/test/regress/expected/privileges.out
|
--- a/src/test/regress/expected/privileges.out
|
||||||
@@ -3173,7 +3194,7 @@ index 1a6c61f49d..1c31ac6a53 100644
|
|||||||
-- Test generic object addressing/identification functions
|
-- Test generic object addressing/identification functions
|
||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
||||||
index 53e86b0b6c..0303fdfe96 100644
|
index 53e86b0b6c..f07cf1ec54 100644
|
||||||
--- a/src/test/regress/sql/password.sql
|
--- a/src/test/regress/sql/password.sql
|
||||||
+++ b/src/test/regress/sql/password.sql
|
+++ b/src/test/regress/sql/password.sql
|
||||||
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
||||||
@@ -3192,59 +3213,23 @@ index 53e86b0b6c..0303fdfe96 100644
|
|||||||
|
|
||||||
-- check list of created entries
|
-- check list of created entries
|
||||||
--
|
--
|
||||||
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
|
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Changing the SCRAM iteration count
|
|
||||||
SET scram_iterations = 1024;
|
|
||||||
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
|
|
||||||
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
|
|
||||||
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
|
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
|
||||||
---
|
|
||||||
--- The first is valid, to act as a control. The others have too long
|
|
||||||
--- stored/server keys. They will be re-hashed.
|
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
|
||||||
-- should not contain the original salt.
|
|
||||||
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
||||||
index 249df17a58..b258e7f26a 100644
|
index 249df17a58..b258e7f26a 100644
|
||||||
--- a/src/test/regress/sql/privileges.sql
|
--- a/src/test/regress/sql/privileges.sql
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ license.workspace = true
|
|||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
# Enables test specific features.
|
# Enables test specific features.
|
||||||
testing = ["fail/failpoints"]
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -19,7 +19,6 @@ camino.workspace = true
|
|||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
cfg-if.workspace = true
|
cfg-if.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
fail.workspace = true
|
|
||||||
flate2.workspace = true
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hyper0 = { workspace = true, features = ["full"] }
|
hyper0 = { workspace = true, features = ["full"] }
|
||||||
|
|||||||
@@ -67,15 +67,12 @@ use compute_tools::params::*;
|
|||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use compute_tools::swap::resize_swap;
|
use compute_tools::swap::resize_swap;
|
||||||
use rlimit::{setrlimit, Resource};
|
use rlimit::{setrlimit, Resource};
|
||||||
use utils::failpoint_support;
|
|
||||||
|
|
||||||
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
// in-case of not-set environment var
|
// in-case of not-set environment var
|
||||||
const BUILD_TAG_DEFAULT: &str = "latest";
|
const BUILD_TAG_DEFAULT: &str = "latest";
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
let scenario = failpoint_support::init();
|
|
||||||
|
|
||||||
let (build_tag, clap_args) = init()?;
|
let (build_tag, clap_args) = init()?;
|
||||||
|
|
||||||
// enable core dumping for all child processes
|
// enable core dumping for all child processes
|
||||||
@@ -103,8 +100,6 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
maybe_delay_exit(delay_exit);
|
maybe_delay_exit(delay_exit);
|
||||||
|
|
||||||
scenario.teardown();
|
|
||||||
|
|
||||||
deinit_and_exit(wait_pg_result);
|
deinit_and_exit(wait_pg_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,48 +246,47 @@ fn try_spec_from_cli(
|
|||||||
let compute_id = matches.get_one::<String>("compute-id");
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
// First, try to get cluster spec from the cli argument
|
let spec;
|
||||||
if let Some(spec_json) = spec_json {
|
let mut live_config_allowed = false;
|
||||||
info!("got spec from cli argument {}", spec_json);
|
match spec_json {
|
||||||
return Ok(CliSpecParams {
|
// First, try to get cluster spec from the cli argument
|
||||||
spec: Some(serde_json::from_str(spec_json)?),
|
Some(json) => {
|
||||||
live_config_allowed: false,
|
info!("got spec from cli argument {}", json);
|
||||||
});
|
spec = Some(serde_json::from_str(json)?);
|
||||||
}
|
|
||||||
|
|
||||||
// Second, try to read it from the file if path is provided
|
|
||||||
if let Some(spec_path) = spec_path {
|
|
||||||
let file = File::open(Path::new(spec_path))?;
|
|
||||||
return Ok(CliSpecParams {
|
|
||||||
spec: Some(serde_json::from_reader(file)?),
|
|
||||||
live_config_allowed: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(compute_id) = compute_id else {
|
|
||||||
panic!(
|
|
||||||
"compute spec should be provided by one of the following ways: \
|
|
||||||
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
|
||||||
);
|
|
||||||
};
|
|
||||||
let Some(control_plane_uri) = control_plane_uri else {
|
|
||||||
panic!("must specify both --control-plane-uri and --compute-id or none");
|
|
||||||
};
|
|
||||||
|
|
||||||
match get_spec_from_control_plane(control_plane_uri, compute_id) {
|
|
||||||
Ok(spec) => Ok(CliSpecParams {
|
|
||||||
spec,
|
|
||||||
live_config_allowed: true,
|
|
||||||
}),
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"cannot get response from control plane: {}\n\
|
|
||||||
neither spec nor confirmation that compute is in the Empty state was received",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Err(e)
|
|
||||||
}
|
}
|
||||||
}
|
None => {
|
||||||
|
// Second, try to read it from the file if path is provided
|
||||||
|
if let Some(sp) = spec_path {
|
||||||
|
let path = Path::new(sp);
|
||||||
|
let file = File::open(path)?;
|
||||||
|
spec = Some(serde_json::from_reader(file)?);
|
||||||
|
live_config_allowed = true;
|
||||||
|
} else if let Some(id) = compute_id {
|
||||||
|
if let Some(cp_base) = control_plane_uri {
|
||||||
|
live_config_allowed = true;
|
||||||
|
spec = match get_spec_from_control_plane(cp_base, id) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
error!("cannot get response from control plane: {}", e);
|
||||||
|
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!(
|
||||||
|
"compute spec should be provided by one of the following ways: \
|
||||||
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(CliSpecParams {
|
||||||
|
spec,
|
||||||
|
live_config_allowed,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CliSpecParams {
|
struct CliSpecParams {
|
||||||
@@ -424,13 +418,9 @@ fn start_postgres(
|
|||||||
"running compute with features: {:?}",
|
"running compute with features: {:?}",
|
||||||
state.pspec.as_ref().unwrap().spec.features
|
state.pspec.as_ref().unwrap().spec.features
|
||||||
);
|
);
|
||||||
// before we release the mutex, fetch some parameters for later.
|
// before we release the mutex, fetch the swap size (if any) for later.
|
||||||
let &ComputeSpec {
|
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
|
||||||
swap_size_bytes,
|
let disk_quota_bytes = state.pspec.as_ref().unwrap().spec.disk_quota_bytes;
|
||||||
disk_quota_bytes,
|
|
||||||
disable_lfc_resizing,
|
|
||||||
..
|
|
||||||
} = &state.pspec.as_ref().unwrap().spec;
|
|
||||||
drop(state);
|
drop(state);
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
@@ -535,18 +525,11 @@ fn start_postgres(
|
|||||||
// This token is used internally by the monitor to clean up all threads
|
// This token is used internally by the monitor to clean up all threads
|
||||||
let token = CancellationToken::new();
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
// don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
|
|
||||||
let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
file_cache_connstr.cloned()
|
|
||||||
};
|
|
||||||
|
|
||||||
let vm_monitor = rt.as_ref().map(|rt| {
|
let vm_monitor = rt.as_ref().map(|rt| {
|
||||||
rt.spawn(vm_monitor::start(
|
rt.spawn(vm_monitor::start(
|
||||||
Box::leak(Box::new(vm_monitor::Args {
|
Box::leak(Box::new(vm_monitor::Args {
|
||||||
cgroup: cgroup.cloned(),
|
cgroup: cgroup.cloned(),
|
||||||
pgconnstr,
|
pgconnstr: file_cache_connstr.cloned(),
|
||||||
addr: vm_monitor_addr.clone(),
|
addr: vm_monitor_addr.clone(),
|
||||||
})),
|
})),
|
||||||
token.clone(),
|
token.clone(),
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ use nix::unistd::Pid;
|
|||||||
use tracing::{info, info_span, warn, Instrument};
|
use tracing::{info, info_span, warn, Instrument};
|
||||||
use utils::fs_ext::is_directory_empty;
|
use utils::fs_ext::is_directory_empty;
|
||||||
|
|
||||||
#[path = "fast_import/aws_s3_sync.rs"]
|
|
||||||
mod aws_s3_sync;
|
|
||||||
#[path = "fast_import/child_stdio_to_log.rs"]
|
#[path = "fast_import/child_stdio_to_log.rs"]
|
||||||
mod child_stdio_to_log;
|
mod child_stdio_to_log;
|
||||||
#[path = "fast_import/s3_uri.rs"]
|
#[path = "fast_import/s3_uri.rs"]
|
||||||
mod s3_uri;
|
mod s3_uri;
|
||||||
|
#[path = "fast_import/s5cmd.rs"]
|
||||||
|
mod s5cmd;
|
||||||
|
|
||||||
#[derive(clap::Parser)]
|
#[derive(clap::Parser)]
|
||||||
struct Args {
|
struct Args {
|
||||||
@@ -326,7 +326,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("upload pgdata");
|
info!("upload pgdata");
|
||||||
aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
|
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
|
||||||
.await
|
.await
|
||||||
.context("sync dump directory to destination")?;
|
.context("sync dump directory to destination")?;
|
||||||
|
|
||||||
@@ -334,10 +334,10 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let status_dir = working_directory.join("status");
|
let status_dir = working_directory.join("status");
|
||||||
std::fs::create_dir(&status_dir).context("create status directory")?;
|
std::fs::create_dir(&status_dir).context("create status directory")?;
|
||||||
let status_file = status_dir.join("pgdata");
|
let status_file = status_dir.join("status");
|
||||||
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
||||||
.context("write status file")?;
|
.context("write status file")?;
|
||||||
aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
|
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
|
||||||
.await
|
.await
|
||||||
.context("sync status directory to destination")?;
|
.context("sync status directory to destination")?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,21 +4,24 @@ use camino::Utf8Path;
|
|||||||
use super::s3_uri::S3Uri;
|
use super::s3_uri::S3Uri;
|
||||||
|
|
||||||
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
||||||
let mut builder = tokio::process::Command::new("aws");
|
let mut builder = tokio::process::Command::new("s5cmd");
|
||||||
|
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
|
||||||
|
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
|
||||||
|
builder.arg("--endpoint-url").arg(val);
|
||||||
|
}
|
||||||
builder
|
builder
|
||||||
.arg("s3")
|
|
||||||
.arg("sync")
|
.arg("sync")
|
||||||
.arg(local.as_str())
|
.arg(local.as_str())
|
||||||
.arg(remote.to_string());
|
.arg(remote.to_string());
|
||||||
let st = builder
|
let st = builder
|
||||||
.spawn()
|
.spawn()
|
||||||
.context("spawn aws s3 sync")?
|
.context("spawn s5cmd")?
|
||||||
.wait()
|
.wait()
|
||||||
.await
|
.await
|
||||||
.context("wait for aws s3 sync")?;
|
.context("wait for s5cmd")?;
|
||||||
if st.success() {
|
if st.success() {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow::anyhow!("aws s3 sync failed"))
|
Err(anyhow::anyhow!("s5cmd failed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1181,19 +1181,8 @@ impl ComputeNode {
|
|||||||
let mut conf = postgres::config::Config::from(conf);
|
let mut conf = postgres::config::Config::from(conf);
|
||||||
conf.application_name("compute_ctl:migrations");
|
conf.application_name("compute_ctl:migrations");
|
||||||
|
|
||||||
match conf.connect(NoTls) {
|
let mut client = conf.connect(NoTls)?;
|
||||||
Ok(mut client) => {
|
handle_migrations(&mut client).context("apply_config handle_migrations")
|
||||||
if let Err(e) = handle_migrations(&mut client) {
|
|
||||||
error!("Failed to run migrations: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"Failed to connect to the compute for running migrations: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
Ok::<(), anyhow::Error>(())
|
||||||
|
|||||||
@@ -24,11 +24,8 @@ use metrics::proto::MetricFamily;
|
|||||||
use metrics::Encoder;
|
use metrics::Encoder;
|
||||||
use metrics::TextEncoder;
|
use metrics::TextEncoder;
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
use tokio_util::sync::CancellationToken;
|
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
use tracing_utils::http::OtelName;
|
use tracing_utils::http::OtelName;
|
||||||
use utils::failpoint_support::failpoints_handler;
|
|
||||||
use utils::http::error::ApiError;
|
|
||||||
use utils::http::request::must_get_query_param;
|
use utils::http::request::must_get_query_param;
|
||||||
|
|
||||||
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
@@ -313,18 +310,6 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(&Method::POST, "/failpoints") if cfg!(feature = "testing") => {
|
|
||||||
match failpoints_handler(req, CancellationToken::new()).await {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(ApiError::BadRequest(e)) => {
|
|
||||||
render_json_error(&e.to_string(), StatusCode::BAD_REQUEST)
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
render_json_error("Internal server error", StatusCode::INTERNAL_SERVER_ERROR)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// download extension files from remote extension storage on demand
|
// download extension files from remote extension storage on demand
|
||||||
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
info!("serving {:?} POST request", route);
|
info!("serving {:?} POST request", route);
|
||||||
|
|||||||
@@ -537,14 +537,12 @@ components:
|
|||||||
properties:
|
properties:
|
||||||
extname:
|
extname:
|
||||||
type: string
|
type: string
|
||||||
version:
|
versions:
|
||||||
type: string
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
n_databases:
|
n_databases:
|
||||||
type: integer
|
type: integer
|
||||||
owned_by_superuser:
|
|
||||||
type: integer
|
|
||||||
|
|
||||||
SetRoleGrantsRequest:
|
SetRoleGrantsRequest:
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||||
use metrics::proto::MetricFamily;
|
use metrics::proto::MetricFamily;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
@@ -37,77 +38,61 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
|
|||||||
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
||||||
///
|
///
|
||||||
/// Same extension can be installed in multiple databases with different versions,
|
/// Same extension can be installed in multiple databases with different versions,
|
||||||
/// so we report a separate metric (number of databases where it is installed)
|
/// we only keep the highest and lowest version across all databases.
|
||||||
/// for each extension version.
|
|
||||||
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
||||||
conf.application_name("compute_ctl:get_installed_extensions");
|
conf.application_name("compute_ctl:get_installed_extensions");
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
let databases: Vec<String> = list_dbs(&mut client)?;
|
let databases: Vec<String> = list_dbs(&mut client)?;
|
||||||
|
|
||||||
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
|
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
||||||
for db in databases.iter() {
|
for db in databases.iter() {
|
||||||
conf.dbname(db);
|
conf.dbname(db);
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
let extensions: Vec<(String, String, i32)> = db_client
|
let extensions: Vec<(String, String)> = db_client
|
||||||
.query(
|
.query(
|
||||||
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension",
|
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| {
|
.map(|row| (row.get("extname"), row.get("extversion")))
|
||||||
(
|
|
||||||
row.get("extname"),
|
|
||||||
row.get("extversion"),
|
|
||||||
row.get("extowner"),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (extname, v, extowner) in extensions.iter() {
|
for (extname, v) in extensions.iter() {
|
||||||
let version = v.to_string();
|
let version = v.to_string();
|
||||||
|
|
||||||
// check if the extension is owned by superuser
|
// increment the number of databases where the version of extension is installed
|
||||||
// 10 is the oid of superuser
|
INSTALLED_EXTENSIONS
|
||||||
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
|
.with_label_values(&[extname, &version])
|
||||||
|
.inc();
|
||||||
|
|
||||||
extensions_map
|
extensions_map
|
||||||
.entry((
|
.entry(extname.to_string())
|
||||||
extname.to_string(),
|
|
||||||
version.clone(),
|
|
||||||
owned_by_superuser.to_string(),
|
|
||||||
))
|
|
||||||
.and_modify(|e| {
|
.and_modify(|e| {
|
||||||
|
e.versions.insert(version.clone());
|
||||||
// count the number of databases where the extension is installed
|
// count the number of databases where the extension is installed
|
||||||
e.n_databases += 1;
|
e.n_databases += 1;
|
||||||
})
|
})
|
||||||
.or_insert(InstalledExtension {
|
.or_insert(InstalledExtension {
|
||||||
extname: extname.to_string(),
|
extname: extname.to_string(),
|
||||||
version: version.clone(),
|
versions: HashSet::from([version.clone()]),
|
||||||
n_databases: 1,
|
n_databases: 1,
|
||||||
owned_by_superuser: owned_by_superuser.to_string(),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (key, ext) in extensions_map.iter() {
|
let res = InstalledExtensions {
|
||||||
let (extname, version, owned_by_superuser) = key;
|
|
||||||
let n_databases = ext.n_databases as u64;
|
|
||||||
|
|
||||||
INSTALLED_EXTENSIONS
|
|
||||||
.with_label_values(&[extname, version, owned_by_superuser])
|
|
||||||
.set(n_databases);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(InstalledExtensions {
|
|
||||||
extensions: extensions_map.into_values().collect(),
|
extensions: extensions_map.into_values().collect(),
|
||||||
})
|
};
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
register_uint_gauge_vec!(
|
register_uint_gauge_vec!(
|
||||||
"compute_installed_extensions",
|
"compute_installed_extensions",
|
||||||
"Number of databases where the version of extension is installed",
|
"Number of databases where the version of extension is installed",
|
||||||
&["extension_name", "version", "owned_by_superuser"]
|
&["extension_name", "version"]
|
||||||
)
|
)
|
||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use fail::fail_point;
|
|
||||||
use postgres::Client;
|
use postgres::Client;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
/// Runs a series of migrations on a target database
|
|
||||||
pub(crate) struct MigrationRunner<'m> {
|
pub(crate) struct MigrationRunner<'m> {
|
||||||
client: &'m mut Client,
|
client: &'m mut Client,
|
||||||
migrations: &'m [&'m str],
|
migrations: &'m [&'m str],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'m> MigrationRunner<'m> {
|
impl<'m> MigrationRunner<'m> {
|
||||||
/// Create a new migration runner
|
|
||||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
||||||
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
||||||
assert!(migrations.len() + 1 < i64::MAX as usize);
|
assert!(migrations.len() + 1 < i64::MAX as usize);
|
||||||
@@ -18,7 +15,6 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Self { client, migrations }
|
Self { client, migrations }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the current value neon_migration.migration_id
|
|
||||||
fn get_migration_id(&mut self) -> Result<i64> {
|
fn get_migration_id(&mut self) -> Result<i64> {
|
||||||
let query = "SELECT id FROM neon_migration.migration_id";
|
let query = "SELECT id FROM neon_migration.migration_id";
|
||||||
let row = self
|
let row = self
|
||||||
@@ -29,61 +25,37 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Ok(row.get::<&str, i64>("id"))
|
Ok(row.get::<&str, i64>("id"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the neon_migration.migration_id value
|
|
||||||
///
|
|
||||||
/// This function has a fail point called compute-migration, which can be
|
|
||||||
/// used if you would like to fail the application of a series of migrations
|
|
||||||
/// at some point.
|
|
||||||
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
||||||
// We use this fail point in order to check that failing in the
|
let setval = format!("UPDATE neon_migration.migration_id SET id={}", migration_id);
|
||||||
// middle of applying a series of migrations fails in an expected
|
|
||||||
// manner
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
let fail = (|| {
|
|
||||||
fail_point!("compute-migration", |fail_migration_id| {
|
|
||||||
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
false
|
|
||||||
})();
|
|
||||||
|
|
||||||
if fail {
|
|
||||||
return Err(anyhow::anyhow!(format!(
|
|
||||||
"migration {} was configured to fail because of a failpoint",
|
|
||||||
migration_id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
.query(
|
.simple_query(&setval)
|
||||||
"UPDATE neon_migration.migration_id SET id = $1",
|
|
||||||
&[&migration_id],
|
|
||||||
)
|
|
||||||
.context("run_migrations update id")?;
|
.context("run_migrations update id")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepare the migrations the target database for handling migrations
|
fn prepare_migrations(&mut self) -> Result<()> {
|
||||||
fn prepare_database(&mut self) -> Result<()> {
|
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")?;
|
|
||||||
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)")?;
|
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
||||||
self.client.simple_query(
|
self.client.simple_query(query)?;
|
||||||
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
|
|
||||||
)?;
|
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")?;
|
|
||||||
self.client
|
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
||||||
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")?;
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
|
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
||||||
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Run the configrured set of migrations
|
|
||||||
pub fn run_migrations(mut self) -> Result<()> {
|
pub fn run_migrations(mut self) -> Result<()> {
|
||||||
self.prepare_database()?;
|
self.prepare_migrations()?;
|
||||||
|
|
||||||
let mut current_migration = self.get_migration_id()? as usize;
|
let mut current_migration = self.get_migration_id()? as usize;
|
||||||
while current_migration < self.migrations.len() {
|
while current_migration < self.migrations.len() {
|
||||||
@@ -97,11 +69,6 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
|
|
||||||
if migration.starts_with("-- SKIP") {
|
if migration.starts_with("-- SKIP") {
|
||||||
info!("Skipping migration id={}", migration_id!(current_migration));
|
info!("Skipping migration id={}", migration_id!(current_migration));
|
||||||
|
|
||||||
// Even though we are skipping the migration, updating the
|
|
||||||
// migration ID should help keep logic easy to understand when
|
|
||||||
// trying to understand the state of a cluster.
|
|
||||||
self.update_migration_id(migration_id!(current_migration))?;
|
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"Running migration id={}:\n{}\n",
|
"Running migration id={}:\n{}\n",
|
||||||
@@ -120,6 +87,7 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
// Migration IDs start at 1
|
||||||
self.update_migration_id(migration_id!(current_migration))?;
|
self.update_migration_id(migration_id!(current_migration))?;
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
bypassrls boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT rolbypassrls INTO bypassrls FROM pg_roles WHERE rolname = 'neon_superuser';
|
|
||||||
IF NOT bypassrls THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot bypass RLS';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
role record;
|
|
||||||
BEGIN
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolinherit AS inherit
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
LOOP
|
|
||||||
IF NOT role.inherit THEN
|
|
||||||
RAISE EXCEPTION '% cannot inherit', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolbypassrls AS bypassrls
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE NOT pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
AND NOT starts_with(rolname, 'pg_')
|
|
||||||
LOOP
|
|
||||||
IF role.bypassrls THEN
|
|
||||||
RAISE EXCEPTION '% can bypass RLS', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
END $$;
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF (SELECT current_setting('server_version_num')::numeric < 160000) THEN
|
|
||||||
RETURN;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT (SELECT pg_has_role('neon_superuser', 'pg_create_subscription', 'member')) THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_create_subscription';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
monitor record;
|
|
||||||
BEGIN
|
|
||||||
SELECT pg_has_role('neon_superuser', 'pg_monitor', 'member') AS member,
|
|
||||||
admin_option AS admin
|
|
||||||
INTO monitor
|
|
||||||
FROM pg_auth_members
|
|
||||||
WHERE roleid = 'pg_monitor'::regrole
|
|
||||||
AND member = 'pg_monitor'::regrole;
|
|
||||||
|
|
||||||
IF NOT monitor.member THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT monitor.admin THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT bool_and(has_function_privilege('neon_superuser', oid, 'execute'))
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname IN ('pg_export_snapshot', 'pg_log_standby_snapshot')
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute both pg_export_snapshot and pg_log_standby_snapshot';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT has_function_privilege('neon_superuser', oid, 'execute')
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname = 'pg_show_replication_origin_status'
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_show_replication_origin_status';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -274,7 +274,6 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|||||||
for env_key in [
|
for env_key in [
|
||||||
"AWS_ACCESS_KEY_ID",
|
"AWS_ACCESS_KEY_ID",
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
"AWS_SESSION_TOKEN",
|
|
||||||
"AWS_PROFILE",
|
"AWS_PROFILE",
|
||||||
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
||||||
"HOME",
|
"HOME",
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use control_plane::storage_controller::{
|
|||||||
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
||||||
};
|
};
|
||||||
use control_plane::{broker, local_env};
|
use control_plane::{broker, local_env};
|
||||||
use nix::fcntl::{flock, FlockArg};
|
|
||||||
use pageserver_api::config::{
|
use pageserver_api::config::{
|
||||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
||||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
||||||
@@ -37,8 +36,6 @@ use safekeeper_api::{
|
|||||||
};
|
};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::{BTreeSet, HashMap};
|
use std::collections::{BTreeSet, HashMap};
|
||||||
use std::fs::File;
|
|
||||||
use std::os::fd::AsRawFd;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -692,21 +689,6 @@ struct TimelineTreeEl {
|
|||||||
pub children: BTreeSet<TimelineId>,
|
pub children: BTreeSet<TimelineId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A flock-based guard over the neon_local repository directory
|
|
||||||
struct RepoLock {
|
|
||||||
_file: File,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RepoLock {
|
|
||||||
fn new() -> Result<Self> {
|
|
||||||
let repo_dir = File::open(local_env::base_path())?;
|
|
||||||
let repo_dir_fd = repo_dir.as_raw_fd();
|
|
||||||
flock(repo_dir_fd, FlockArg::LockExclusive)?;
|
|
||||||
|
|
||||||
Ok(Self { _file: repo_dir })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main entry point for the 'neon_local' CLI utility
|
// Main entry point for the 'neon_local' CLI utility
|
||||||
//
|
//
|
||||||
// This utility helps to manage neon installation. That includes following:
|
// This utility helps to manage neon installation. That includes following:
|
||||||
@@ -718,14 +700,9 @@ fn main() -> Result<()> {
|
|||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
// Check for 'neon init' command first.
|
// Check for 'neon init' command first.
|
||||||
let (subcommand_result, _lock) = if let NeonLocalCmd::Init(args) = cli.command {
|
let subcommand_result = if let NeonLocalCmd::Init(args) = cli.command {
|
||||||
(handle_init(&args).map(|env| Some(Cow::Owned(env))), None)
|
handle_init(&args).map(|env| Some(Cow::Owned(env)))
|
||||||
} else {
|
} else {
|
||||||
// This tool uses a collection of simple files to store its state, and consequently
|
|
||||||
// it is not generally safe to run multiple commands concurrently. Rather than expect
|
|
||||||
// all callers to know this, use a lock file to protect against concurrent execution.
|
|
||||||
let _repo_lock = RepoLock::new().unwrap();
|
|
||||||
|
|
||||||
// all other commands need an existing config
|
// all other commands need an existing config
|
||||||
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
||||||
let original_env = env.clone();
|
let original_env = env.clone();
|
||||||
@@ -751,12 +728,11 @@ fn main() -> Result<()> {
|
|||||||
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
||||||
};
|
};
|
||||||
|
|
||||||
let subcommand_result = if &original_env != env {
|
if &original_env != env {
|
||||||
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
||||||
} else {
|
} else {
|
||||||
subcommand_result.map(|()| None)
|
subcommand_result.map(|()| None)
|
||||||
};
|
}
|
||||||
(subcommand_result, Some(_repo_lock))
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match subcommand_result {
|
match subcommand_result {
|
||||||
@@ -946,7 +922,7 @@ fn handle_init(args: &InitCmdArgs) -> anyhow::Result<LocalEnv> {
|
|||||||
} else {
|
} else {
|
||||||
// User (likely interactive) did not provide a description of the environment, give them the default
|
// User (likely interactive) did not provide a description of the environment, give them the default
|
||||||
NeonLocalInitConf {
|
NeonLocalInitConf {
|
||||||
control_plane_api: Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap()),
|
control_plane_api: Some(Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap())),
|
||||||
broker: NeonBroker {
|
broker: NeonBroker {
|
||||||
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
||||||
},
|
},
|
||||||
@@ -1742,15 +1718,18 @@ async fn handle_start_all_impl(
|
|||||||
broker::start_broker_process(env, &retry_timeout).await
|
broker::start_broker_process(env, &retry_timeout).await
|
||||||
});
|
});
|
||||||
|
|
||||||
js.spawn(async move {
|
// Only start the storage controller if the pageserver is configured to need it
|
||||||
let storage_controller = StorageController::from_env(env);
|
if env.control_plane_api.is_some() {
|
||||||
storage_controller
|
js.spawn(async move {
|
||||||
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
let storage_controller = StorageController::from_env(env);
|
||||||
retry_timeout,
|
storage_controller
|
||||||
))
|
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
||||||
.await
|
retry_timeout,
|
||||||
.map_err(|e| e.context("start storage_controller"))
|
))
|
||||||
});
|
.await
|
||||||
|
.map_err(|e| e.context("start storage_controller"))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
for ps_conf in &env.pageservers {
|
for ps_conf in &env.pageservers {
|
||||||
js.spawn(async move {
|
js.spawn(async move {
|
||||||
@@ -1795,6 +1774,10 @@ async fn neon_start_status_check(
|
|||||||
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
||||||
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
if env.control_plane_api.is_none() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let storcon = StorageController::from_env(env);
|
let storcon = StorageController::from_env(env);
|
||||||
|
|
||||||
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
||||||
|
|||||||
@@ -316,10 +316,6 @@ impl Endpoint {
|
|||||||
// and can cause errors like 'no unpinned buffers available', see
|
// and can cause errors like 'no unpinned buffers available', see
|
||||||
// <https://github.com/neondatabase/neon/issues/9956>
|
// <https://github.com/neondatabase/neon/issues/9956>
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
// Postgres defaults to effective_io_concurrency=1, which does not exercise the pageserver's
|
|
||||||
// batching logic. Set this to 2 so that we exercise the code a bit without letting
|
|
||||||
// individual tests do a lot of concurrent work on underpowered test machines
|
|
||||||
conf.append("effective_io_concurrency", "2");
|
|
||||||
conf.append("fsync", "off");
|
conf.append("fsync", "off");
|
||||||
conf.append("max_connections", "100");
|
conf.append("max_connections", "100");
|
||||||
conf.append("wal_level", "logical");
|
conf.append("wal_level", "logical");
|
||||||
@@ -585,7 +581,6 @@ impl Endpoint {
|
|||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
swap_size_bytes: None,
|
swap_size_bytes: None,
|
||||||
disk_quota_bytes: None,
|
disk_quota_bytes: None,
|
||||||
disable_lfc_resizing: None,
|
|
||||||
cluster: Cluster {
|
cluster: Cluster {
|
||||||
cluster_id: None, // project ID: not used
|
cluster_id: None, // project ID: not used
|
||||||
name: None, // project name: not used
|
name: None, // project name: not used
|
||||||
@@ -815,7 +810,7 @@ impl Endpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let client = reqwest::Client::builder()
|
let client = reqwest::Client::builder()
|
||||||
.timeout(Duration::from_secs(120))
|
.timeout(Duration::from_secs(30))
|
||||||
.build()
|
.build()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let response = client
|
let response = client
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ pub struct LocalEnv {
|
|||||||
|
|
||||||
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
||||||
// be propagated into each pageserver's configuration.
|
// be propagated into each pageserver's configuration.
|
||||||
pub control_plane_api: Url,
|
pub control_plane_api: Option<Url>,
|
||||||
|
|
||||||
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
||||||
// storage controller's configuration.
|
// storage controller's configuration.
|
||||||
@@ -133,7 +133,7 @@ pub struct NeonLocalInitConf {
|
|||||||
pub storage_controller: Option<NeonStorageControllerConf>,
|
pub storage_controller: Option<NeonStorageControllerConf>,
|
||||||
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
||||||
pub safekeepers: Vec<SafekeeperConf>,
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
pub control_plane_api: Option<Url>,
|
pub control_plane_api: Option<Option<Url>>,
|
||||||
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ impl NeonStorageControllerConf {
|
|||||||
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
||||||
|
|
||||||
// Very tight heartbeat interval to speed up tests
|
// Very tight heartbeat interval to speed up tests
|
||||||
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(1000);
|
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for NeonStorageControllerConf {
|
impl Default for NeonStorageControllerConf {
|
||||||
@@ -535,7 +535,7 @@ impl LocalEnv {
|
|||||||
storage_controller,
|
storage_controller,
|
||||||
pageservers,
|
pageservers,
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api: control_plane_api.unwrap(),
|
control_plane_api,
|
||||||
control_plane_compute_hook_api,
|
control_plane_compute_hook_api,
|
||||||
branch_name_mappings,
|
branch_name_mappings,
|
||||||
}
|
}
|
||||||
@@ -638,7 +638,7 @@ impl LocalEnv {
|
|||||||
storage_controller: self.storage_controller.clone(),
|
storage_controller: self.storage_controller.clone(),
|
||||||
pageservers: vec![], // it's skip_serializing anyway
|
pageservers: vec![], // it's skip_serializing anyway
|
||||||
safekeepers: self.safekeepers.clone(),
|
safekeepers: self.safekeepers.clone(),
|
||||||
control_plane_api: Some(self.control_plane_api.clone()),
|
control_plane_api: self.control_plane_api.clone(),
|
||||||
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
||||||
branch_name_mappings: self.branch_name_mappings.clone(),
|
branch_name_mappings: self.branch_name_mappings.clone(),
|
||||||
},
|
},
|
||||||
@@ -768,7 +768,7 @@ impl LocalEnv {
|
|||||||
storage_controller: storage_controller.unwrap_or_default(),
|
storage_controller: storage_controller.unwrap_or_default(),
|
||||||
pageservers: pageservers.iter().map(Into::into).collect(),
|
pageservers: pageservers.iter().map(Into::into).collect(),
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api: control_plane_api.unwrap(),
|
control_plane_api: control_plane_api.unwrap_or_default(),
|
||||||
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
||||||
branch_name_mappings: Default::default(),
|
branch_name_mappings: Default::default(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -95,19 +95,21 @@ impl PageServerNode {
|
|||||||
|
|
||||||
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
||||||
|
|
||||||
overrides.push(format!(
|
if let Some(control_plane_api) = &self.env.control_plane_api {
|
||||||
"control_plane_api='{}'",
|
overrides.push(format!(
|
||||||
self.env.control_plane_api.as_str()
|
"control_plane_api='{}'",
|
||||||
));
|
control_plane_api.as_str()
|
||||||
|
));
|
||||||
|
|
||||||
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
||||||
// for us, we will also need it to talk to them.
|
// for us, we will also need it to talk to them.
|
||||||
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
||||||
let jwt_token = self
|
let jwt_token = self
|
||||||
.env
|
.env
|
||||||
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !conf.other.contains_key("remote_storage") {
|
if !conf.other.contains_key("remote_storage") {
|
||||||
@@ -433,7 +435,7 @@ impl PageServerNode {
|
|||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let config = Self::parse_config(settings)?;
|
let config = Self::parse_config(settings)?;
|
||||||
self.http_client
|
self.http_client
|
||||||
.set_tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
.tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -338,7 +338,7 @@ impl StorageController {
|
|||||||
.port(),
|
.port(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
let listen_url = self.env.control_plane_api.clone();
|
let listen_url = self.env.control_plane_api.clone().unwrap();
|
||||||
|
|
||||||
let listen = format!(
|
let listen = format!(
|
||||||
"{}:{}",
|
"{}:{}",
|
||||||
@@ -708,7 +708,7 @@ impl StorageController {
|
|||||||
} else {
|
} else {
|
||||||
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
||||||
// for general purpose API access.
|
// for general purpose API access.
|
||||||
let listen_url = self.env.control_plane_api.clone();
|
let listen_url = self.env.control_plane_api.clone().unwrap();
|
||||||
Url::from_str(&format!(
|
Url::from_str(&format!(
|
||||||
"http://{}:{}/{path}",
|
"http://{}:{}/{path}",
|
||||||
listen_url.host_str().unwrap(),
|
listen_url.host_str().unwrap(),
|
||||||
|
|||||||
@@ -5,13 +5,12 @@ use clap::{Parser, Subcommand};
|
|||||||
use pageserver_api::{
|
use pageserver_api::{
|
||||||
controller_api::{
|
controller_api::{
|
||||||
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
||||||
SafekeeperDescribeResponse, ShardSchedulingPolicy, TenantCreateRequest,
|
ShardSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
|
||||||
TenantDescribeResponse, TenantPolicyRequest,
|
|
||||||
},
|
},
|
||||||
models::{
|
models::{
|
||||||
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
||||||
ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest,
|
ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest,
|
||||||
TenantShardSplitRequest, TenantShardSplitResponse,
|
TenantShardSplitResponse,
|
||||||
},
|
},
|
||||||
shard::{ShardStripeSize, TenantShardId},
|
shard::{ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -117,19 +116,9 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_shard_id: TenantShardId,
|
tenant_shard_id: TenantShardId,
|
||||||
},
|
},
|
||||||
/// Set the pageserver tenant configuration of a tenant: this is the configuration structure
|
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
|
||||||
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
||||||
/// Any previous tenant configs are overwritten.
|
TenantConfig {
|
||||||
SetTenantConfig {
|
|
||||||
#[arg(long)]
|
|
||||||
tenant_id: TenantId,
|
|
||||||
#[arg(long)]
|
|
||||||
config: String,
|
|
||||||
},
|
|
||||||
/// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
|
|
||||||
/// provided JSON are unset from the tenant config and all fields with non-null values are set.
|
|
||||||
/// Unspecified fields are not changed.
|
|
||||||
PatchTenantConfig {
|
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
@@ -212,8 +201,6 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
timeout: humantime::Duration,
|
timeout: humantime::Duration,
|
||||||
},
|
},
|
||||||
/// List safekeepers known to the storage controller
|
|
||||||
Safekeepers {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@@ -562,21 +549,11 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Command::SetTenantConfig { tenant_id, config } => {
|
Command::TenantConfig { tenant_id, config } => {
|
||||||
let tenant_conf = serde_json::from_str(&config)?;
|
let tenant_conf = serde_json::from_str(&config)?;
|
||||||
|
|
||||||
vps_client
|
vps_client
|
||||||
.set_tenant_config(&TenantConfigRequest {
|
.tenant_config(&TenantConfigRequest {
|
||||||
tenant_id,
|
|
||||||
config: tenant_conf,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Command::PatchTenantConfig { tenant_id, config } => {
|
|
||||||
let tenant_conf = serde_json::from_str(&config)?;
|
|
||||||
|
|
||||||
vps_client
|
|
||||||
.patch_tenant_config(&TenantConfigPatchRequest {
|
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: tenant_conf,
|
config: tenant_conf,
|
||||||
})
|
})
|
||||||
@@ -759,7 +736,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
threshold,
|
threshold,
|
||||||
} => {
|
} => {
|
||||||
vps_client
|
vps_client
|
||||||
.set_tenant_config(&TenantConfigRequest {
|
.tenant_config(&TenantConfigRequest {
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: TenantConfig {
|
config: TenantConfig {
|
||||||
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
||||||
@@ -1023,31 +1000,6 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Command::Safekeepers {} => {
|
|
||||||
let mut resp = storcon_client
|
|
||||||
.dispatch::<(), Vec<SafekeeperDescribeResponse>>(
|
|
||||||
Method::GET,
|
|
||||||
"control/v1/safekeeper".to_string(),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
resp.sort_by(|a, b| a.id.cmp(&b.id));
|
|
||||||
|
|
||||||
let mut table = comfy_table::Table::new();
|
|
||||||
table.set_header(["Id", "Version", "Host", "Port", "Http Port", "AZ Id"]);
|
|
||||||
for sk in resp {
|
|
||||||
table.add_row([
|
|
||||||
format!("{}", sk.id),
|
|
||||||
format!("{}", sk.version),
|
|
||||||
sk.host,
|
|
||||||
format!("{}", sk.port),
|
|
||||||
format!("{}", sk.http_port),
|
|
||||||
sk.availability_zone_id.to_string(),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
println!("{table}");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -42,7 +42,6 @@ allow = [
|
|||||||
"MPL-2.0",
|
"MPL-2.0",
|
||||||
"OpenSSL",
|
"OpenSSL",
|
||||||
"Unicode-DFS-2016",
|
"Unicode-DFS-2016",
|
||||||
"Unicode-3.0",
|
|
||||||
]
|
]
|
||||||
confidence-threshold = 0.8
|
confidence-threshold = 0.8
|
||||||
exceptions = [
|
exceptions = [
|
||||||
|
|||||||
@@ -132,6 +132,11 @@
|
|||||||
"name": "cron.database",
|
"name": "cron.database",
|
||||||
"value": "postgres",
|
"value": "postgres",
|
||||||
"vartype": "string"
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "session_preload_libraries",
|
||||||
|
"value": "anon",
|
||||||
|
"vartype": "string"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -35,11 +35,11 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
echo "clean up containers if exists"
|
echo "clean up containers if exists"
|
||||||
cleanup
|
cleanup
|
||||||
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
||||||
# The support of pg_anon not yet added to PG17, so we have to add the corresponding option for other PG versions
|
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
||||||
if [ "${pg_version}" -ne 17 ]; then
|
if [ $pg_version -eq 17 ]; then
|
||||||
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
||||||
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
||||||
jq '.cluster.settings += [{"name": "session_preload_libraries","value": "anon","vartype": "string"}]' "${SPEC_PATH}/spec.bak" > "${SPEC_PATH}/spec.json"
|
jq 'del(.cluster.settings[] | select (.name == "session_preload_libraries"))' $SPEC_PATH/spec.bak > $SPEC_PATH/spec.json
|
||||||
fi
|
fi
|
||||||
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
||||||
|
|
||||||
@@ -106,8 +106,8 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
cleanup
|
cleanup
|
||||||
# Restore the original spec.json
|
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
||||||
if [ "$pg_version" -ne 17 ]; then
|
if [ $pg_version -eq 17 ]; then
|
||||||
mv "$SPEC_PATH/spec.bak" "$SPEC_PATH/spec.json"
|
mv $SPEC_PATH/spec.bak $SPEC_PATH/spec.json
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
@@ -162,9 +163,8 @@ pub enum ControlPlaneComputeStatus {
|
|||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
pub struct InstalledExtension {
|
pub struct InstalledExtension {
|
||||||
pub extname: String,
|
pub extname: String,
|
||||||
pub version: String,
|
pub versions: HashSet<String>,
|
||||||
pub n_databases: u32, // Number of databases using this extension
|
pub n_databases: u32, // Number of databases using this extension
|
||||||
pub owned_by_superuser: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
|
|||||||
@@ -67,15 +67,6 @@ pub struct ComputeSpec {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disk_quota_bytes: Option<u64>,
|
pub disk_quota_bytes: Option<u64>,
|
||||||
|
|
||||||
/// Disables the vm-monitor behavior that resizes LFC on upscale/downscale, instead relying on
|
|
||||||
/// the initial size of LFC.
|
|
||||||
///
|
|
||||||
/// This is intended for use when the LFC size is being overridden from the default but
|
|
||||||
/// autoscaling is still enabled, and we don't want the vm-monitor to interfere with the custom
|
|
||||||
/// LFC sizing.
|
|
||||||
#[serde(default)]
|
|
||||||
pub disable_lfc_resizing: Option<bool>,
|
|
||||||
|
|
||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ impl Timing {
|
|||||||
|
|
||||||
/// Return true if there is a ready event.
|
/// Return true if there is a ready event.
|
||||||
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
||||||
queue.peek().is_some_and(|x| x.time <= self.now())
|
queue.peek().map_or(false, |x| x.time <= self.now())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear all pending events.
|
/// Clear all pending events.
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ pub struct TenantPolicyRequest {
|
|||||||
pub scheduling: Option<ShardSchedulingPolicy>,
|
pub scheduling: Option<ShardSchedulingPolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
|
||||||
pub struct AvailabilityZone(pub String);
|
pub struct AvailabilityZone(pub String);
|
||||||
|
|
||||||
impl Display for AvailabilityZone {
|
impl Display for AvailabilityZone {
|
||||||
@@ -245,17 +245,6 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scheduling policy enables us to selectively disable some automatic actions that the
|
|
||||||
/// controller performs on a tenant shard. This is only set to a non-default value by
|
|
||||||
/// human intervention, and it is reset to the default value (Active) when the tenant's
|
|
||||||
/// placement policy is modified away from Attached.
|
|
||||||
///
|
|
||||||
/// The typical use of a non-Active scheduling policy is one of:
|
|
||||||
/// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
|
|
||||||
/// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
|
|
||||||
///
|
|
||||||
/// If you're not sure which policy to use to pin a shard to its current location, you probably
|
|
||||||
/// want Pause.
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub enum ShardSchedulingPolicy {
|
pub enum ShardSchedulingPolicy {
|
||||||
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
||||||
@@ -372,23 +361,6 @@ pub struct MetadataHealthListOutdatedResponse {
|
|||||||
pub health_records: Vec<MetadataHealthRecord>,
|
pub health_records: Vec<MetadataHealthRecord>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Publicly exposed safekeeper description
|
|
||||||
///
|
|
||||||
/// The `active` flag which we have in the DB is not included on purpose: it is deprecated.
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
|
||||||
pub struct SafekeeperDescribeResponse {
|
|
||||||
pub id: NodeId,
|
|
||||||
pub region_id: String,
|
|
||||||
/// 1 is special, it means just created (not currently posted to storcon).
|
|
||||||
/// Zero or negative is not really expected.
|
|
||||||
/// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
|
|
||||||
pub version: i64,
|
|
||||||
pub host: String,
|
|
||||||
pub port: i32,
|
|
||||||
pub http_port: i32,
|
|
||||||
pub availability_zone_id: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub struct Key {
|
|||||||
|
|
||||||
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
||||||
/// a struct of fields.
|
/// a struct of fields.
|
||||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]
|
||||||
pub struct CompactKey(i128);
|
pub struct CompactKey(i128);
|
||||||
|
|
||||||
/// The storage key size.
|
/// The storage key size.
|
||||||
@@ -565,10 +565,6 @@ impl Key {
|
|||||||
&& self.field5 == 0
|
&& self.field5 == 0
|
||||||
&& self.field6 == u32::MAX
|
&& self.field6 == u32::MAX
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_slru_dir_key(&self) -> bool {
|
|
||||||
slru_dir_kind(self).is_some()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ pub mod utilization;
|
|||||||
use camino::Utf8PathBuf;
|
use camino::Utf8PathBuf;
|
||||||
pub use utilization::PageserverUtilization;
|
pub use utilization::PageserverUtilization;
|
||||||
|
|
||||||
use core::ops::Range;
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
@@ -18,7 +17,7 @@ use std::{
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use postgres_ffi::BLCKSZ;
|
use postgres_ffi::BLCKSZ;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::serde_as;
|
use serde_with::serde_as;
|
||||||
use utils::{
|
use utils::{
|
||||||
completion,
|
completion,
|
||||||
@@ -29,7 +28,6 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
key::Key,
|
|
||||||
reltag::RelTag,
|
reltag::RelTag,
|
||||||
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -212,68 +210,6 @@ pub enum TimelineState {
|
|||||||
Broken { reason: String, backtrace: String },
|
Broken { reason: String, backtrace: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub struct CompactLsnRange {
|
|
||||||
pub start: Lsn,
|
|
||||||
pub end: Lsn,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub struct CompactKeyRange {
|
|
||||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
|
||||||
pub start: Key,
|
|
||||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
|
||||||
pub end: Key,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Range<Lsn>> for CompactLsnRange {
|
|
||||||
fn from(range: Range<Lsn>) -> Self {
|
|
||||||
Self {
|
|
||||||
start: range.start,
|
|
||||||
end: range.end,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Range<Key>> for CompactKeyRange {
|
|
||||||
fn from(range: Range<Key>) -> Self {
|
|
||||||
Self {
|
|
||||||
start: range.start,
|
|
||||||
end: range.end,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<CompactLsnRange> for Range<Lsn> {
|
|
||||||
fn from(range: CompactLsnRange) -> Self {
|
|
||||||
range.start..range.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<CompactKeyRange> for Range<Key> {
|
|
||||||
fn from(range: CompactKeyRange) -> Self {
|
|
||||||
range.start..range.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompactLsnRange {
|
|
||||||
pub fn above(lsn: Lsn) -> Self {
|
|
||||||
Self {
|
|
||||||
start: lsn,
|
|
||||||
end: Lsn::MAX,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct CompactInfoResponse {
|
|
||||||
pub compact_key_range: Option<CompactKeyRange>,
|
|
||||||
pub compact_lsn_range: Option<CompactLsnRange>,
|
|
||||||
pub sub_compaction: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
pub struct TimelineCreateRequest {
|
pub struct TimelineCreateRequest {
|
||||||
pub new_timeline_id: TimelineId,
|
pub new_timeline_id: TimelineId,
|
||||||
@@ -389,115 +325,6 @@ impl Default for ShardParameters {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, Eq, PartialEq)]
|
|
||||||
pub enum FieldPatch<T> {
|
|
||||||
Upsert(T),
|
|
||||||
Remove,
|
|
||||||
#[default]
|
|
||||||
Noop,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> FieldPatch<T> {
|
|
||||||
fn is_noop(&self) -> bool {
|
|
||||||
matches!(self, FieldPatch::Noop)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn apply(self, target: &mut Option<T>) {
|
|
||||||
match self {
|
|
||||||
Self::Upsert(v) => *target = Some(v),
|
|
||||||
Self::Remove => *target = None,
|
|
||||||
Self::Noop => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
|
|
||||||
match self {
|
|
||||||
Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
|
|
||||||
Self::Remove => Ok(FieldPatch::<U>::Remove),
|
|
||||||
Self::Noop => Ok(FieldPatch::<U>::Noop),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
Option::deserialize(deserializer).map(|opt| match opt {
|
|
||||||
None => FieldPatch::Remove,
|
|
||||||
Some(val) => FieldPatch::Upsert(val),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize> Serialize for FieldPatch<T> {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
match self {
|
|
||||||
FieldPatch::Upsert(val) => serializer.serialize_some(val),
|
|
||||||
FieldPatch::Remove => serializer.serialize_none(),
|
|
||||||
FieldPatch::Noop => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub struct TenantConfigPatch {
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub checkpoint_distance: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub checkpoint_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_target_size: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_threshold: FieldPatch<usize>,
|
|
||||||
// defer parsing compaction_algorithm, like eviction_policy
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub gc_horizon: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub gc_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub image_creation_threshold: FieldPatch<usize>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub pitr_interval: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub walreceiver_connect_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lagging_wal_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub eviction_policy: FieldPatch<EvictionPolicy>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub min_resident_size_override: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub heatmap_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lazy_slru_download: FieldPatch<bool>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub image_layer_creation_check_threshold: FieldPatch<u8>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lsn_lease_length: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lsn_lease_length_for_ts: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub timeline_offloading: FieldPatch<bool>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
||||||
/// simpler types.
|
/// simpler types.
|
||||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
||||||
@@ -529,107 +356,6 @@ pub struct TenantConfig {
|
|||||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TenantConfig {
|
|
||||||
pub fn apply_patch(self, patch: TenantConfigPatch) -> TenantConfig {
|
|
||||||
let Self {
|
|
||||||
mut checkpoint_distance,
|
|
||||||
mut checkpoint_timeout,
|
|
||||||
mut compaction_target_size,
|
|
||||||
mut compaction_period,
|
|
||||||
mut compaction_threshold,
|
|
||||||
mut compaction_algorithm,
|
|
||||||
mut gc_horizon,
|
|
||||||
mut gc_period,
|
|
||||||
mut image_creation_threshold,
|
|
||||||
mut pitr_interval,
|
|
||||||
mut walreceiver_connect_timeout,
|
|
||||||
mut lagging_wal_timeout,
|
|
||||||
mut max_lsn_wal_lag,
|
|
||||||
mut eviction_policy,
|
|
||||||
mut min_resident_size_override,
|
|
||||||
mut evictions_low_residence_duration_metric_threshold,
|
|
||||||
mut heatmap_period,
|
|
||||||
mut lazy_slru_download,
|
|
||||||
mut timeline_get_throttle,
|
|
||||||
mut image_layer_creation_check_threshold,
|
|
||||||
mut lsn_lease_length,
|
|
||||||
mut lsn_lease_length_for_ts,
|
|
||||||
mut timeline_offloading,
|
|
||||||
mut wal_receiver_protocol_override,
|
|
||||||
} = self;
|
|
||||||
|
|
||||||
patch.checkpoint_distance.apply(&mut checkpoint_distance);
|
|
||||||
patch.checkpoint_timeout.apply(&mut checkpoint_timeout);
|
|
||||||
patch
|
|
||||||
.compaction_target_size
|
|
||||||
.apply(&mut compaction_target_size);
|
|
||||||
patch.compaction_period.apply(&mut compaction_period);
|
|
||||||
patch.compaction_threshold.apply(&mut compaction_threshold);
|
|
||||||
patch.compaction_algorithm.apply(&mut compaction_algorithm);
|
|
||||||
patch.gc_horizon.apply(&mut gc_horizon);
|
|
||||||
patch.gc_period.apply(&mut gc_period);
|
|
||||||
patch
|
|
||||||
.image_creation_threshold
|
|
||||||
.apply(&mut image_creation_threshold);
|
|
||||||
patch.pitr_interval.apply(&mut pitr_interval);
|
|
||||||
patch
|
|
||||||
.walreceiver_connect_timeout
|
|
||||||
.apply(&mut walreceiver_connect_timeout);
|
|
||||||
patch.lagging_wal_timeout.apply(&mut lagging_wal_timeout);
|
|
||||||
patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
|
|
||||||
patch.eviction_policy.apply(&mut eviction_policy);
|
|
||||||
patch
|
|
||||||
.min_resident_size_override
|
|
||||||
.apply(&mut min_resident_size_override);
|
|
||||||
patch
|
|
||||||
.evictions_low_residence_duration_metric_threshold
|
|
||||||
.apply(&mut evictions_low_residence_duration_metric_threshold);
|
|
||||||
patch.heatmap_period.apply(&mut heatmap_period);
|
|
||||||
patch.lazy_slru_download.apply(&mut lazy_slru_download);
|
|
||||||
patch
|
|
||||||
.timeline_get_throttle
|
|
||||||
.apply(&mut timeline_get_throttle);
|
|
||||||
patch
|
|
||||||
.image_layer_creation_check_threshold
|
|
||||||
.apply(&mut image_layer_creation_check_threshold);
|
|
||||||
patch.lsn_lease_length.apply(&mut lsn_lease_length);
|
|
||||||
patch
|
|
||||||
.lsn_lease_length_for_ts
|
|
||||||
.apply(&mut lsn_lease_length_for_ts);
|
|
||||||
patch.timeline_offloading.apply(&mut timeline_offloading);
|
|
||||||
patch
|
|
||||||
.wal_receiver_protocol_override
|
|
||||||
.apply(&mut wal_receiver_protocol_override);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
checkpoint_distance,
|
|
||||||
checkpoint_timeout,
|
|
||||||
compaction_target_size,
|
|
||||||
compaction_period,
|
|
||||||
compaction_threshold,
|
|
||||||
compaction_algorithm,
|
|
||||||
gc_horizon,
|
|
||||||
gc_period,
|
|
||||||
image_creation_threshold,
|
|
||||||
pitr_interval,
|
|
||||||
walreceiver_connect_timeout,
|
|
||||||
lagging_wal_timeout,
|
|
||||||
max_lsn_wal_lag,
|
|
||||||
eviction_policy,
|
|
||||||
min_resident_size_override,
|
|
||||||
evictions_low_residence_duration_metric_threshold,
|
|
||||||
heatmap_period,
|
|
||||||
lazy_slru_download,
|
|
||||||
timeline_get_throttle,
|
|
||||||
image_layer_creation_check_threshold,
|
|
||||||
lsn_lease_length,
|
|
||||||
lsn_lease_length_for_ts,
|
|
||||||
timeline_offloading,
|
|
||||||
wal_receiver_protocol_override,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The policy for the aux file storage.
|
/// The policy for the aux file storage.
|
||||||
///
|
///
|
||||||
/// It can be switched through `switch_aux_file_policy` tenant config.
|
/// It can be switched through `switch_aux_file_policy` tenant config.
|
||||||
@@ -960,14 +686,6 @@ impl TenantConfigRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct TenantConfigPatchRequest {
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
||||||
@@ -1981,45 +1699,4 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tenant_config_patch_request_serde() {
|
|
||||||
let patch_request = TenantConfigPatchRequest {
|
|
||||||
tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
|
|
||||||
config: TenantConfigPatch {
|
|
||||||
checkpoint_distance: FieldPatch::Upsert(42),
|
|
||||||
gc_horizon: FieldPatch::Remove,
|
|
||||||
compaction_threshold: FieldPatch::Noop,
|
|
||||||
..TenantConfigPatch::default()
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let json = serde_json::to_string(&patch_request).unwrap();
|
|
||||||
|
|
||||||
let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
|
|
||||||
assert_eq!(json, expected);
|
|
||||||
|
|
||||||
let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
|
|
||||||
assert_eq!(decoded.tenant_id, patch_request.tenant_id);
|
|
||||||
assert_eq!(decoded.config, patch_request.config);
|
|
||||||
|
|
||||||
// Now apply the patch to a config to demonstrate semantics
|
|
||||||
|
|
||||||
let base = TenantConfig {
|
|
||||||
checkpoint_distance: Some(28),
|
|
||||||
gc_horizon: Some(100),
|
|
||||||
compaction_target_size: Some(1024),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let expected = TenantConfig {
|
|
||||||
checkpoint_distance: Some(42),
|
|
||||||
gc_horizon: None,
|
|
||||||
..base.clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
let patched = base.apply_patch(decoded.config);
|
|
||||||
|
|
||||||
assert_eq!(patched, expected);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -173,11 +173,7 @@ impl ShardIdentity {
|
|||||||
|
|
||||||
/// Return true if the key should be stored on all shards, not just one.
|
/// Return true if the key should be stored on all shards, not just one.
|
||||||
pub fn is_key_global(&self, key: &Key) -> bool {
|
pub fn is_key_global(&self, key: &Key) -> bool {
|
||||||
if key.is_slru_block_key()
|
if key.is_slru_block_key() || key.is_slru_segment_size_key() || key.is_aux_file_key() {
|
||||||
|| key.is_slru_segment_size_key()
|
|
||||||
|| key.is_aux_file_key()
|
|
||||||
|| key.is_slru_dir_key()
|
|
||||||
{
|
|
||||||
// Special keys that are only stored on shard 0
|
// Special keys that are only stored on shard 0
|
||||||
false
|
false
|
||||||
} else if key.is_rel_block_key() {
|
} else if key.is_rel_block_key() {
|
||||||
|
|||||||
@@ -9,11 +9,9 @@ regex.workspace = true
|
|||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
criterion.workspace = true
|
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
memoffset.workspace = true
|
memoffset.workspace = true
|
||||||
pprof.workspace = true
|
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
@@ -26,7 +24,3 @@ postgres.workspace = true
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
bindgen.workspace = true
|
bindgen.workspace = true
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "waldecoder"
|
|
||||||
harness = false
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
## Benchmarks
|
|
||||||
|
|
||||||
To run benchmarks:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# All benchmarks.
|
|
||||||
cargo bench --package postgres_ffi
|
|
||||||
|
|
||||||
# Specific file.
|
|
||||||
cargo bench --package postgres_ffi --bench waldecoder
|
|
||||||
|
|
||||||
# Specific benchmark.
|
|
||||||
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024
|
|
||||||
|
|
||||||
# List available benchmarks.
|
|
||||||
cargo bench --package postgres_ffi --benches -- --list
|
|
||||||
|
|
||||||
# Generate flamegraph profiles using pprof-rs, profiling for 10 seconds.
|
|
||||||
# Output in target/criterion/*/profile/flamegraph.svg.
|
|
||||||
cargo bench --package postgres_ffi --bench waldecoder complete_record/size=1024 -- --profile-time 10
|
|
||||||
```
|
|
||||||
|
|
||||||
Additional charts and statistics are available in `target/criterion/report/index.html`.
|
|
||||||
|
|
||||||
Benchmarks are automatically compared against the previous run. To compare against other runs, see
|
|
||||||
`--baseline` and `--save-baseline`.
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
use std::ffi::CStr;
|
|
||||||
|
|
||||||
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
|
|
||||||
use postgres_ffi::v17::wal_generator::LogicalMessageGenerator;
|
|
||||||
use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
|
|
||||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
|
||||||
use pprof::criterion::{Output, PProfProfiler};
|
|
||||||
use utils::lsn::Lsn;
|
|
||||||
|
|
||||||
const KB: usize = 1024;
|
|
||||||
|
|
||||||
// Register benchmarks with Criterion.
|
|
||||||
criterion_group!(
|
|
||||||
name = benches;
|
|
||||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
|
||||||
targets = bench_complete_record,
|
|
||||||
);
|
|
||||||
criterion_main!(benches);
|
|
||||||
|
|
||||||
/// Benchmarks WalStreamDecoder::complete_record() for a logical message of varying size.
|
|
||||||
fn bench_complete_record(c: &mut Criterion) {
|
|
||||||
let mut g = c.benchmark_group("complete_record");
|
|
||||||
for size in [64, KB, 8 * KB, 128 * KB] {
|
|
||||||
// Kind of weird to change the group throughput per benchmark, but it's the only way
|
|
||||||
// to vary it per benchmark. It works.
|
|
||||||
g.throughput(criterion::Throughput::Bytes(size as u64));
|
|
||||||
g.bench_function(format!("size={size}"), |b| run_bench(b, size).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_bench(b: &mut Bencher, size: usize) -> anyhow::Result<()> {
|
|
||||||
const PREFIX: &CStr = c"";
|
|
||||||
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
|
|
||||||
let value = vec![1; value_size];
|
|
||||||
|
|
||||||
let mut decoder = WalStreamDecoder::new(Lsn(0), 170000);
|
|
||||||
let msg = LogicalMessageGenerator::new(PREFIX, &value)
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.encode(Lsn(0));
|
|
||||||
assert_eq!(msg.len(), size);
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let msg = msg.clone(); // Bytes::clone() is cheap
|
|
||||||
decoder.complete_record(msg).unwrap();
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -106,11 +106,11 @@ impl<R: RecordGenerator> WalGenerator<R> {
|
|||||||
const TIMELINE_ID: u32 = 1;
|
const TIMELINE_ID: u32 = 1;
|
||||||
|
|
||||||
/// Creates a new WAL generator with the given record generator.
|
/// Creates a new WAL generator with the given record generator.
|
||||||
pub fn new(record_generator: R, start_lsn: Lsn) -> WalGenerator<R> {
|
pub fn new(record_generator: R) -> WalGenerator<R> {
|
||||||
Self {
|
Self {
|
||||||
record_generator,
|
record_generator,
|
||||||
lsn: start_lsn,
|
lsn: Lsn(0),
|
||||||
prev_lsn: start_lsn,
|
prev_lsn: Lsn(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,22 +231,6 @@ impl LogicalMessageGenerator {
|
|||||||
};
|
};
|
||||||
[&header.encode(), prefix, message].concat().into()
|
[&header.encode(), prefix, message].concat().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes how large a value must be to get a record of the given size. Convenience method to
|
|
||||||
/// construct records of pre-determined size. Panics if the record size is too small.
|
|
||||||
pub fn make_value_size(record_size: usize, prefix: &CStr) -> usize {
|
|
||||||
let xlog_header_size = XLOG_SIZE_OF_XLOG_RECORD;
|
|
||||||
let lm_header_size = size_of::<XlLogicalMessage>();
|
|
||||||
let prefix_size = prefix.to_bytes_with_nul().len();
|
|
||||||
let data_header_size = match record_size - xlog_header_size - 2 {
|
|
||||||
0..=255 => 2,
|
|
||||||
256..=258 => panic!("impossible record_size {record_size}"),
|
|
||||||
259.. => 5,
|
|
||||||
};
|
|
||||||
record_size
|
|
||||||
.checked_sub(xlog_header_size + lm_header_size + prefix_size + data_header_size)
|
|
||||||
.expect("record_size too small")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for LogicalMessageGenerator {
|
impl Iterator for LogicalMessageGenerator {
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ fn test_end_of_wal<C: crate::Crafter>(test_name: &str) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let mut f = File::options().write(true).open(file.path()).unwrap();
|
let mut f = File::options().write(true).open(file.path()).unwrap();
|
||||||
static ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE];
|
const ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE];
|
||||||
f.write_all(
|
f.write_all(
|
||||||
&ZEROS[0..min(
|
&ZEROS[0..min(
|
||||||
WAL_SEGMENT_SIZE,
|
WAL_SEGMENT_SIZE,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "postgres-protocol2"
|
name = "postgres-protocol2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2018"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -9,7 +9,8 @@
|
|||||||
//!
|
//!
|
||||||
//! This library assumes that the `client_encoding` backend parameter has been
|
//! This library assumes that the `client_encoding` backend parameter has been
|
||||||
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
|
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
|
||||||
#![warn(missing_docs, clippy::all)]
|
#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")]
|
||||||
|
#![warn(missing_docs, rust_2018_idioms, clippy::all)]
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use bytes::{BufMut, BytesMut};
|
use bytes::{BufMut, BytesMut};
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
|
use std::convert::TryFrom;
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::marker;
|
use std::marker;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "postgres-types2"
|
name = "postgres-types2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2018"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
//!
|
//!
|
||||||
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
|
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
|
||||||
//! unless you want to define your own `ToSql` or `FromSql` definitions.
|
//! unless you want to define your own `ToSql` or `FromSql` definitions.
|
||||||
#![warn(clippy::all, missing_docs)]
|
#![doc(html_root_url = "https://docs.rs/postgres-types/0.2")]
|
||||||
|
#![warn(clippy::all, rust_2018_idioms, missing_docs)]
|
||||||
|
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use postgres_protocol2::types;
|
use postgres_protocol2::types;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tokio-postgres2"
|
name = "tokio-postgres2"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2018"
|
||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -4,23 +4,18 @@ use crate::config::Host;
|
|||||||
use crate::config::SslMode;
|
use crate::config::SslMode;
|
||||||
use crate::connection::{Request, RequestMessages};
|
use crate::connection::{Request, RequestMessages};
|
||||||
|
|
||||||
use crate::query::RowStream;
|
use crate::types::{Oid, Type};
|
||||||
use crate::simple_query::SimpleQueryStream;
|
|
||||||
|
|
||||||
use crate::types::{Oid, ToSql, Type};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
prepare, query, simple_query, slice_iter, CancelToken, Error, ReadyForQueryStatus, Row,
|
simple_query, CancelToken, Error, ReadyForQueryStatus, Statement, Transaction,
|
||||||
SimpleQueryMessage, Statement, ToStatement, Transaction, TransactionBuilder,
|
TransactionBuilder,
|
||||||
};
|
};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use futures_util::{future, ready, TryStreamExt};
|
use futures_util::{future, ready};
|
||||||
use parking_lot::Mutex;
|
|
||||||
use postgres_protocol2::message::{backend::Message, frontend};
|
use postgres_protocol2::message::{backend::Message, frontend};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
@@ -55,7 +50,7 @@ impl Responses {
|
|||||||
/// A cache of type info and prepared statements for fetching type info
|
/// A cache of type info and prepared statements for fetching type info
|
||||||
/// (corresponding to the queries in the [prepare] module).
|
/// (corresponding to the queries in the [prepare] module).
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct CachedTypeInfo {
|
pub(crate) struct CachedTypeInfo {
|
||||||
/// A statement for basic information for a type from its
|
/// A statement for basic information for a type from its
|
||||||
/// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its
|
/// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its
|
||||||
/// fallback).
|
/// fallback).
|
||||||
@@ -71,13 +66,45 @@ struct CachedTypeInfo {
|
|||||||
/// Cache of types already looked up.
|
/// Cache of types already looked up.
|
||||||
types: HashMap<Oid, Type>,
|
types: HashMap<Oid, Type>,
|
||||||
}
|
}
|
||||||
|
impl CachedTypeInfo {
|
||||||
|
pub(crate) fn typeinfo(&mut self) -> Option<&Statement> {
|
||||||
|
self.typeinfo.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_typeinfo(&mut self, statement: Statement) -> &Statement {
|
||||||
|
self.typeinfo.insert(statement)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typeinfo_composite(&mut self) -> Option<&Statement> {
|
||||||
|
self.typeinfo_composite.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_typeinfo_composite(&mut self, statement: Statement) -> &Statement {
|
||||||
|
self.typeinfo_composite.insert(statement)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typeinfo_enum(&mut self) -> Option<&Statement> {
|
||||||
|
self.typeinfo_enum.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_typeinfo_enum(&mut self, statement: Statement) -> &Statement {
|
||||||
|
self.typeinfo_enum.insert(statement)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn type_(&mut self, oid: Oid) -> Option<Type> {
|
||||||
|
self.types.get(&oid).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_type(&mut self, oid: Oid, type_: &Type) {
|
||||||
|
self.types.insert(oid, type_.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct InnerClient {
|
pub struct InnerClient {
|
||||||
sender: mpsc::UnboundedSender<Request>,
|
sender: mpsc::UnboundedSender<Request>,
|
||||||
cached_typeinfo: Mutex<CachedTypeInfo>,
|
|
||||||
|
|
||||||
/// A buffer to use when writing out postgres commands.
|
/// A buffer to use when writing out postgres commands.
|
||||||
buffer: Mutex<BytesMut>,
|
buffer: BytesMut,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerClient {
|
impl InnerClient {
|
||||||
@@ -92,47 +119,14 @@ impl InnerClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn typeinfo(&self) -> Option<Statement> {
|
|
||||||
self.cached_typeinfo.lock().typeinfo.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_typeinfo(&self, statement: &Statement) {
|
|
||||||
self.cached_typeinfo.lock().typeinfo = Some(statement.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn typeinfo_composite(&self) -> Option<Statement> {
|
|
||||||
self.cached_typeinfo.lock().typeinfo_composite.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_typeinfo_composite(&self, statement: &Statement) {
|
|
||||||
self.cached_typeinfo.lock().typeinfo_composite = Some(statement.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn typeinfo_enum(&self) -> Option<Statement> {
|
|
||||||
self.cached_typeinfo.lock().typeinfo_enum.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_typeinfo_enum(&self, statement: &Statement) {
|
|
||||||
self.cached_typeinfo.lock().typeinfo_enum = Some(statement.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn type_(&self, oid: Oid) -> Option<Type> {
|
|
||||||
self.cached_typeinfo.lock().types.get(&oid).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_type(&self, oid: Oid, type_: &Type) {
|
|
||||||
self.cached_typeinfo.lock().types.insert(oid, type_.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Call the given function with a buffer to be used when writing out
|
/// Call the given function with a buffer to be used when writing out
|
||||||
/// postgres commands.
|
/// postgres commands.
|
||||||
pub fn with_buf<F, R>(&self, f: F) -> R
|
pub fn with_buf<F, R>(&mut self, f: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut BytesMut) -> R,
|
F: FnOnce(&mut BytesMut) -> R,
|
||||||
{
|
{
|
||||||
let mut buffer = self.buffer.lock();
|
let r = f(&mut self.buffer);
|
||||||
let r = f(&mut buffer);
|
self.buffer.clear();
|
||||||
buffer.clear();
|
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -150,7 +144,8 @@ pub struct SocketConfig {
|
|||||||
/// The client is one half of what is returned when a connection is established. Users interact with the database
|
/// The client is one half of what is returned when a connection is established. Users interact with the database
|
||||||
/// through this client object.
|
/// through this client object.
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
inner: Arc<InnerClient>,
|
pub(crate) inner: InnerClient,
|
||||||
|
pub(crate) cached_typeinfo: CachedTypeInfo,
|
||||||
|
|
||||||
socket_config: SocketConfig,
|
socket_config: SocketConfig,
|
||||||
ssl_mode: SslMode,
|
ssl_mode: SslMode,
|
||||||
@@ -167,11 +162,11 @@ impl Client {
|
|||||||
secret_key: i32,
|
secret_key: i32,
|
||||||
) -> Client {
|
) -> Client {
|
||||||
Client {
|
Client {
|
||||||
inner: Arc::new(InnerClient {
|
inner: InnerClient {
|
||||||
sender,
|
sender,
|
||||||
cached_typeinfo: Default::default(),
|
|
||||||
buffer: Default::default(),
|
buffer: Default::default(),
|
||||||
}),
|
},
|
||||||
|
cached_typeinfo: Default::default(),
|
||||||
|
|
||||||
socket_config,
|
socket_config,
|
||||||
ssl_mode,
|
ssl_mode,
|
||||||
@@ -185,161 +180,6 @@ impl Client {
|
|||||||
self.process_id
|
self.process_id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn inner(&self) -> &Arc<InnerClient> {
|
|
||||||
&self.inner
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new prepared statement.
|
|
||||||
///
|
|
||||||
/// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc),
|
|
||||||
/// which are set when executed. Prepared statements can only be used with the connection that created them.
|
|
||||||
pub async fn prepare(&self, query: &str) -> Result<Statement, Error> {
|
|
||||||
self.prepare_typed(query, &[]).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `prepare`, but allows the types of query parameters to be explicitly specified.
|
|
||||||
///
|
|
||||||
/// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be
|
|
||||||
/// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`.
|
|
||||||
pub async fn prepare_typed(
|
|
||||||
&self,
|
|
||||||
query: &str,
|
|
||||||
parameter_types: &[Type],
|
|
||||||
) -> Result<Statement, Error> {
|
|
||||||
prepare::prepare(&self.inner, query, parameter_types).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a statement, returning a vector of the resulting rows.
|
|
||||||
///
|
|
||||||
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
|
||||||
/// provided, 1-indexed.
|
|
||||||
///
|
|
||||||
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
|
||||||
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
|
||||||
/// with the `prepare` method.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the number of parameters provided does not match the number expected.
|
|
||||||
pub async fn query<T>(
|
|
||||||
&self,
|
|
||||||
statement: &T,
|
|
||||||
params: &[&(dyn ToSql + Sync)],
|
|
||||||
) -> Result<Vec<Row>, Error>
|
|
||||||
where
|
|
||||||
T: ?Sized + ToStatement,
|
|
||||||
{
|
|
||||||
self.query_raw(statement, slice_iter(params))
|
|
||||||
.await?
|
|
||||||
.try_collect()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The maximally flexible version of [`query`].
|
|
||||||
///
|
|
||||||
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
|
||||||
/// provided, 1-indexed.
|
|
||||||
///
|
|
||||||
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
|
||||||
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
|
||||||
/// with the `prepare` method.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the number of parameters provided does not match the number expected.
|
|
||||||
///
|
|
||||||
/// [`query`]: #method.query
|
|
||||||
pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<RowStream, Error>
|
|
||||||
where
|
|
||||||
T: ?Sized + ToStatement,
|
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
|
||||||
I::IntoIter: ExactSizeIterator,
|
|
||||||
{
|
|
||||||
let statement = statement.__convert().into_statement(self).await?;
|
|
||||||
query::query(&self.inner, statement, params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pass text directly to the Postgres backend to allow it to sort out typing itself and
|
|
||||||
/// to save a roundtrip
|
|
||||||
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
|
||||||
where
|
|
||||||
S: AsRef<str>,
|
|
||||||
I: IntoIterator<Item = Option<S>>,
|
|
||||||
I::IntoIter: ExactSizeIterator,
|
|
||||||
{
|
|
||||||
query::query_txt(&self.inner, statement, params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a statement, returning the number of rows modified.
|
|
||||||
///
|
|
||||||
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
|
||||||
/// provided, 1-indexed.
|
|
||||||
///
|
|
||||||
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
|
||||||
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
|
||||||
/// with the `prepare` method.
|
|
||||||
///
|
|
||||||
/// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the number of parameters provided does not match the number expected.
|
|
||||||
pub async fn execute<T>(
|
|
||||||
&self,
|
|
||||||
statement: &T,
|
|
||||||
params: &[&(dyn ToSql + Sync)],
|
|
||||||
) -> Result<u64, Error>
|
|
||||||
where
|
|
||||||
T: ?Sized + ToStatement,
|
|
||||||
{
|
|
||||||
self.execute_raw(statement, slice_iter(params)).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The maximally flexible version of [`execute`].
|
|
||||||
///
|
|
||||||
/// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list
|
|
||||||
/// provided, 1-indexed.
|
|
||||||
///
|
|
||||||
/// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be
|
|
||||||
/// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front
|
|
||||||
/// with the `prepare` method.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the number of parameters provided does not match the number expected.
|
|
||||||
///
|
|
||||||
/// [`execute`]: #method.execute
|
|
||||||
pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result<u64, Error>
|
|
||||||
where
|
|
||||||
T: ?Sized + ToStatement,
|
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
|
||||||
I::IntoIter: ExactSizeIterator,
|
|
||||||
{
|
|
||||||
let statement = statement.__convert().into_statement(self).await?;
|
|
||||||
query::execute(self.inner(), statement, params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows.
|
|
||||||
///
|
|
||||||
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
|
||||||
/// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings,
|
|
||||||
/// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the
|
|
||||||
/// rows, this method returns a list of an enum which indicates either the completion of one of the commands,
|
|
||||||
/// or a row of data. This preserves the framing between the separate statements in the request.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
|
||||||
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
|
||||||
/// them to this method!
|
|
||||||
pub async fn simple_query(&self, query: &str) -> Result<Vec<SimpleQueryMessage>, Error> {
|
|
||||||
self.simple_query_raw(query).await?.try_collect().await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn simple_query_raw(&self, query: &str) -> Result<SimpleQueryStream, Error> {
|
|
||||||
simple_query::simple_query(self.inner(), query).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a sequence of SQL statements using the simple query protocol.
|
/// Executes a sequence of SQL statements using the simple query protocol.
|
||||||
///
|
///
|
||||||
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
|
||||||
@@ -350,8 +190,8 @@ impl Client {
|
|||||||
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
|
||||||
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
|
||||||
/// them to this method!
|
/// them to this method!
|
||||||
pub async fn batch_execute(&self, query: &str) -> Result<ReadyForQueryStatus, Error> {
|
pub async fn batch_execute(&mut self, query: &str) -> Result<ReadyForQueryStatus, Error> {
|
||||||
simple_query::batch_execute(self.inner(), query).await
|
simple_query::batch_execute(&mut self.inner, query).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Begins a new database transaction.
|
/// Begins a new database transaction.
|
||||||
@@ -359,7 +199,7 @@ impl Client {
|
|||||||
/// The transaction will roll back by default - use the `commit` method to commit it.
|
/// The transaction will roll back by default - use the `commit` method to commit it.
|
||||||
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
|
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
|
||||||
struct RollbackIfNotDone<'me> {
|
struct RollbackIfNotDone<'me> {
|
||||||
client: &'me Client,
|
client: &'me mut Client,
|
||||||
done: bool,
|
done: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,13 +209,13 @@ impl Client {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let buf = self.client.inner().with_buf(|buf| {
|
let buf = self.client.inner.with_buf(|buf| {
|
||||||
frontend::query("ROLLBACK", buf).unwrap();
|
frontend::query("ROLLBACK", buf).unwrap();
|
||||||
buf.split().freeze()
|
buf.split().freeze()
|
||||||
});
|
});
|
||||||
let _ = self
|
let _ = self
|
||||||
.client
|
.client
|
||||||
.inner()
|
.inner
|
||||||
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -390,7 +230,7 @@ impl Client {
|
|||||||
client: self,
|
client: self,
|
||||||
done: false,
|
done: false,
|
||||||
};
|
};
|
||||||
self.batch_execute("BEGIN").await?;
|
cleaner.client.batch_execute("BEGIN").await?;
|
||||||
cleaner.done = true;
|
cleaner.done = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -416,11 +256,6 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for type information
|
|
||||||
pub async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
|
||||||
crate::prepare::get_type(&self.inner, oid).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Determines if the connection to the server has already closed.
|
/// Determines if the connection to the server has already closed.
|
||||||
///
|
///
|
||||||
/// In that case, all future queries will fail.
|
/// In that case, all future queries will fail.
|
||||||
|
|||||||
@@ -33,12 +33,8 @@ pub struct Response {
|
|||||||
#[derive(PartialEq, Debug)]
|
#[derive(PartialEq, Debug)]
|
||||||
enum State {
|
enum State {
|
||||||
Active,
|
Active,
|
||||||
Closing,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum WriteReady {
|
|
||||||
Terminating,
|
Terminating,
|
||||||
WaitingOnRead,
|
Closing,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A connection to a PostgreSQL database.
|
/// A connection to a PostgreSQL database.
|
||||||
@@ -55,6 +51,7 @@ pub struct Connection<S, T> {
|
|||||||
/// HACK: we need this in the Neon Proxy to forward params.
|
/// HACK: we need this in the Neon Proxy to forward params.
|
||||||
pub parameters: HashMap<String, String>,
|
pub parameters: HashMap<String, String>,
|
||||||
receiver: mpsc::UnboundedReceiver<Request>,
|
receiver: mpsc::UnboundedReceiver<Request>,
|
||||||
|
pending_request: Option<RequestMessages>,
|
||||||
pending_responses: VecDeque<BackendMessage>,
|
pending_responses: VecDeque<BackendMessage>,
|
||||||
responses: VecDeque<Response>,
|
responses: VecDeque<Response>,
|
||||||
state: State,
|
state: State,
|
||||||
@@ -75,6 +72,7 @@ where
|
|||||||
stream,
|
stream,
|
||||||
parameters,
|
parameters,
|
||||||
receiver,
|
receiver,
|
||||||
|
pending_request: None,
|
||||||
pending_responses,
|
pending_responses,
|
||||||
responses: VecDeque::new(),
|
responses: VecDeque::new(),
|
||||||
state: State::Active,
|
state: State::Active,
|
||||||
@@ -95,23 +93,26 @@ where
|
|||||||
.map(|o| o.map(|r| r.map_err(Error::io)))
|
.map(|o| o.map(|r| r.map_err(Error::io)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read and process messages from the connection to postgres.
|
fn poll_read(&mut self, cx: &mut Context<'_>) -> Result<Option<AsyncMessage>, Error> {
|
||||||
/// client <- postgres
|
if self.state != State::Active {
|
||||||
fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<AsyncMessage, Error>> {
|
trace!("poll_read: done");
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let message = match self.poll_response(cx)? {
|
let message = match self.poll_response(cx)? {
|
||||||
Poll::Ready(Some(message)) => message,
|
Poll::Ready(Some(message)) => message,
|
||||||
Poll::Ready(None) => return Poll::Ready(Err(Error::closed())),
|
Poll::Ready(None) => return Err(Error::closed()),
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
trace!("poll_read: waiting on response");
|
trace!("poll_read: waiting on response");
|
||||||
return Poll::Pending;
|
return Ok(None);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let (mut messages, request_complete) = match message {
|
let (mut messages, request_complete) = match message {
|
||||||
BackendMessage::Async(Message::NoticeResponse(body)) => {
|
BackendMessage::Async(Message::NoticeResponse(body)) => {
|
||||||
let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?;
|
let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?;
|
||||||
return Poll::Ready(Ok(AsyncMessage::Notice(error)));
|
return Ok(Some(AsyncMessage::Notice(error)));
|
||||||
}
|
}
|
||||||
BackendMessage::Async(Message::NotificationResponse(body)) => {
|
BackendMessage::Async(Message::NotificationResponse(body)) => {
|
||||||
let notification = Notification {
|
let notification = Notification {
|
||||||
@@ -119,7 +120,7 @@ where
|
|||||||
channel: body.channel().map_err(Error::parse)?.to_string(),
|
channel: body.channel().map_err(Error::parse)?.to_string(),
|
||||||
payload: body.message().map_err(Error::parse)?.to_string(),
|
payload: body.message().map_err(Error::parse)?.to_string(),
|
||||||
};
|
};
|
||||||
return Poll::Ready(Ok(AsyncMessage::Notification(notification)));
|
return Ok(Some(AsyncMessage::Notification(notification)));
|
||||||
}
|
}
|
||||||
BackendMessage::Async(Message::ParameterStatus(body)) => {
|
BackendMessage::Async(Message::ParameterStatus(body)) => {
|
||||||
self.parameters.insert(
|
self.parameters.insert(
|
||||||
@@ -138,10 +139,8 @@ where
|
|||||||
let mut response = match self.responses.pop_front() {
|
let mut response = match self.responses.pop_front() {
|
||||||
Some(response) => response,
|
Some(response) => response,
|
||||||
None => match messages.next().map_err(Error::parse)? {
|
None => match messages.next().map_err(Error::parse)? {
|
||||||
Some(Message::ErrorResponse(error)) => {
|
Some(Message::ErrorResponse(error)) => return Err(Error::db(error)),
|
||||||
return Poll::Ready(Err(Error::db(error)))
|
_ => return Err(Error::unexpected_message()),
|
||||||
}
|
|
||||||
_ => return Poll::Ready(Err(Error::unexpected_message())),
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -165,14 +164,18 @@ where
|
|||||||
request_complete,
|
request_complete,
|
||||||
});
|
});
|
||||||
trace!("poll_read: waiting on sender");
|
trace!("poll_read: waiting on sender");
|
||||||
return Poll::Pending;
|
return Ok(None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the next client request and enqueue the response sender.
|
|
||||||
fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll<Option<RequestMessages>> {
|
fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll<Option<RequestMessages>> {
|
||||||
|
if let Some(messages) = self.pending_request.take() {
|
||||||
|
trace!("retrying pending request");
|
||||||
|
return Poll::Ready(Some(messages));
|
||||||
|
}
|
||||||
|
|
||||||
if self.receiver.is_closed() {
|
if self.receiver.is_closed() {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
}
|
}
|
||||||
@@ -190,80 +193,74 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process client requests and write them to the postgres connection, flushing if necessary.
|
fn poll_write(&mut self, cx: &mut Context<'_>) -> Result<bool, Error> {
|
||||||
/// client -> postgres
|
|
||||||
fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<Result<WriteReady, Error>> {
|
|
||||||
loop {
|
loop {
|
||||||
|
if self.state == State::Closing {
|
||||||
|
trace!("poll_write: done");
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
if Pin::new(&mut self.stream)
|
if Pin::new(&mut self.stream)
|
||||||
.poll_ready(cx)
|
.poll_ready(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
.is_pending()
|
.is_pending()
|
||||||
{
|
{
|
||||||
trace!("poll_write: waiting on socket");
|
trace!("poll_write: waiting on socket");
|
||||||
|
return Ok(false);
|
||||||
// poll_ready is self-flushing.
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.poll_request(cx) {
|
let request = match self.poll_request(cx) {
|
||||||
// send the message to postgres
|
Poll::Ready(Some(request)) => request,
|
||||||
Poll::Ready(Some(RequestMessages::Single(request))) => {
|
Poll::Ready(None) if self.responses.is_empty() && self.state == State::Active => {
|
||||||
Pin::new(&mut self.stream)
|
|
||||||
.start_send(request)
|
|
||||||
.map_err(Error::io)?;
|
|
||||||
}
|
|
||||||
// No more messages from the client, and no more responses to wait for.
|
|
||||||
// Send a terminate message to postgres
|
|
||||||
Poll::Ready(None) if self.responses.is_empty() => {
|
|
||||||
trace!("poll_write: at eof, terminating");
|
trace!("poll_write: at eof, terminating");
|
||||||
|
self.state = State::Terminating;
|
||||||
let mut request = BytesMut::new();
|
let mut request = BytesMut::new();
|
||||||
frontend::terminate(&mut request);
|
frontend::terminate(&mut request);
|
||||||
let request = FrontendMessage::Raw(request.freeze());
|
RequestMessages::Single(FrontendMessage::Raw(request.freeze()))
|
||||||
|
|
||||||
Pin::new(&mut self.stream)
|
|
||||||
.start_send(request)
|
|
||||||
.map_err(Error::io)?;
|
|
||||||
|
|
||||||
trace!("poll_write: sent eof, closing");
|
|
||||||
trace!("poll_write: done");
|
|
||||||
return Poll::Ready(Ok(WriteReady::Terminating));
|
|
||||||
}
|
}
|
||||||
// No more messages from the client, but there are still some responses to wait for.
|
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(None) => {
|
||||||
trace!(
|
trace!(
|
||||||
"poll_write: at eof, pending responses {}",
|
"poll_write: at eof, pending responses {}",
|
||||||
self.responses.len()
|
self.responses.len()
|
||||||
);
|
);
|
||||||
ready!(self.poll_flush(cx))?;
|
return Ok(true);
|
||||||
return Poll::Ready(Ok(WriteReady::WaitingOnRead));
|
|
||||||
}
|
}
|
||||||
// Still waiting for a message from the client.
|
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
trace!("poll_write: waiting on request");
|
trace!("poll_write: waiting on request");
|
||||||
ready!(self.poll_flush(cx))?;
|
return Ok(true);
|
||||||
return Poll::Pending;
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match request {
|
||||||
|
RequestMessages::Single(request) => {
|
||||||
|
Pin::new(&mut self.stream)
|
||||||
|
.start_send(request)
|
||||||
|
.map_err(Error::io)?;
|
||||||
|
if self.state == State::Terminating {
|
||||||
|
trace!("poll_write: sent eof, closing");
|
||||||
|
self.state = State::Closing;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Result<(), Error> {
|
||||||
match Pin::new(&mut self.stream)
|
match Pin::new(&mut self.stream)
|
||||||
.poll_flush(cx)
|
.poll_flush(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
{
|
{
|
||||||
Poll::Ready(()) => {
|
Poll::Ready(()) => trace!("poll_flush: flushed"),
|
||||||
trace!("poll_flush: flushed");
|
Poll::Pending => trace!("poll_flush: waiting on socket"),
|
||||||
Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
Poll::Pending => {
|
|
||||||
trace!("poll_flush: waiting on socket");
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
||||||
|
if self.state != State::Closing {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
|
|
||||||
match Pin::new(&mut self.stream)
|
match Pin::new(&mut self.stream)
|
||||||
.poll_close(cx)
|
.poll_close(cx)
|
||||||
.map_err(Error::io)?
|
.map_err(Error::io)?
|
||||||
@@ -292,30 +289,18 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut Context<'_>,
|
cx: &mut Context<'_>,
|
||||||
) -> Poll<Option<Result<AsyncMessage, Error>>> {
|
) -> Poll<Option<Result<AsyncMessage, Error>>> {
|
||||||
if self.state != State::Closing {
|
let message = self.poll_read(cx)?;
|
||||||
// if the state is still active, try read from and write to postgres.
|
let want_flush = self.poll_write(cx)?;
|
||||||
let message = self.poll_read(cx)?;
|
if want_flush {
|
||||||
let closing = self.poll_write(cx)?;
|
self.poll_flush(cx)?;
|
||||||
if let Poll::Ready(WriteReady::Terminating) = closing {
|
|
||||||
self.state = State::Closing;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Poll::Ready(message) = message {
|
|
||||||
return Poll::Ready(Some(Ok(message)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// poll_read returned Pending.
|
|
||||||
// poll_write returned Pending or Ready(WriteReady::WaitingOnRead).
|
|
||||||
// if poll_write returned Ready(WriteReady::WaitingOnRead), then we are waiting to read more data from postgres.
|
|
||||||
if self.state != State::Closing {
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
match message {
|
||||||
match self.poll_shutdown(cx) {
|
Some(message) => Poll::Ready(Some(Ok(message))),
|
||||||
Poll::Ready(Ok(())) => Poll::Ready(None),
|
None => match self.poll_shutdown(cx) {
|
||||||
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
|
Poll::Ready(Ok(())) => Poll::Ready(None),
|
||||||
Poll::Pending => Poll::Pending,
|
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::query::RowStream;
|
use crate::query::{self, RowStream};
|
||||||
use crate::types::Type;
|
use crate::types::Type;
|
||||||
use crate::{Client, Error, Transaction};
|
use crate::{Client, Error, Transaction};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -13,33 +13,32 @@ mod private {
|
|||||||
/// This trait is "sealed", and cannot be implemented outside of this crate.
|
/// This trait is "sealed", and cannot be implemented outside of this crate.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait GenericClient: private::Sealed {
|
pub trait GenericClient: private::Sealed {
|
||||||
/// Like `Client::query_raw_txt`.
|
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send;
|
I::IntoIter: ExactSizeIterator + Sync + Send;
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&self, oid: Oid) -> Result<Type, Error>;
|
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl private::Sealed for Client {}
|
impl private::Sealed for Client {}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl GenericClient for Client {
|
impl GenericClient for Client {
|
||||||
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send,
|
I::IntoIter: ExactSizeIterator + Sync + Send,
|
||||||
{
|
{
|
||||||
self.query_raw_txt(statement, params).await
|
query::query_txt(&mut self.inner, statement, params).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
|
||||||
self.get_type(oid).await
|
crate::prepare::get_type(&mut self.inner, &mut self.cached_typeinfo, oid).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,17 +47,18 @@ impl private::Sealed for Transaction<'_> {}
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
#[allow(clippy::needless_lifetimes)]
|
#[allow(clippy::needless_lifetimes)]
|
||||||
impl GenericClient for Transaction<'_> {
|
impl GenericClient for Transaction<'_> {
|
||||||
async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
async fn query_raw_txt<S, I>(&mut self, statement: &str, params: I) -> Result<RowStream, Error>
|
||||||
where
|
where
|
||||||
S: AsRef<str> + Sync + Send,
|
S: AsRef<str> + Sync + Send,
|
||||||
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
I: IntoIterator<Item = Option<S>> + Sync + Send,
|
||||||
I::IntoIter: ExactSizeIterator + Sync + Send,
|
I::IntoIter: ExactSizeIterator + Sync + Send,
|
||||||
{
|
{
|
||||||
self.query_raw_txt(statement, params).await
|
query::query_txt(&mut self.client().inner, statement, params).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for type information
|
/// Query for type information
|
||||||
async fn get_type(&self, oid: Oid) -> Result<Type, Error> {
|
async fn get_type(&mut self, oid: Oid) -> Result<Type, Error> {
|
||||||
self.client().get_type(oid).await
|
let client = self.client();
|
||||||
|
crate::prepare::get_type(&mut client.inner, &mut client.cached_typeinfo, oid).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
//! An asynchronous, pipelined, PostgreSQL client.
|
//! An asynchronous, pipelined, PostgreSQL client.
|
||||||
#![warn(clippy::all)]
|
#![warn(rust_2018_idioms, clippy::all)]
|
||||||
|
|
||||||
pub use crate::cancel_token::CancelToken;
|
pub use crate::cancel_token::CancelToken;
|
||||||
pub use crate::client::{Client, SocketConfig};
|
pub use crate::client::{Client, SocketConfig};
|
||||||
@@ -10,11 +10,10 @@ use crate::error::DbError;
|
|||||||
pub use crate::error::Error;
|
pub use crate::error::Error;
|
||||||
pub use crate::generic_client::GenericClient;
|
pub use crate::generic_client::GenericClient;
|
||||||
pub use crate::query::RowStream;
|
pub use crate::query::RowStream;
|
||||||
pub use crate::row::{Row, SimpleQueryRow};
|
pub use crate::row::Row;
|
||||||
pub use crate::simple_query::SimpleQueryStream;
|
|
||||||
pub use crate::statement::{Column, Statement};
|
pub use crate::statement::{Column, Statement};
|
||||||
pub use crate::tls::NoTls;
|
pub use crate::tls::NoTls;
|
||||||
pub use crate::to_statement::ToStatement;
|
// pub use crate::to_statement::ToStatement;
|
||||||
pub use crate::transaction::Transaction;
|
pub use crate::transaction::Transaction;
|
||||||
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
|
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
|
||||||
use crate::types::ToSql;
|
use crate::types::ToSql;
|
||||||
@@ -65,7 +64,7 @@ pub mod row;
|
|||||||
mod simple_query;
|
mod simple_query;
|
||||||
mod statement;
|
mod statement;
|
||||||
pub mod tls;
|
pub mod tls;
|
||||||
mod to_statement;
|
// mod to_statement;
|
||||||
mod transaction;
|
mod transaction;
|
||||||
mod transaction_builder;
|
mod transaction_builder;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
@@ -98,7 +97,6 @@ impl Notification {
|
|||||||
/// An asynchronous message from the server.
|
/// An asynchronous message from the server.
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum AsyncMessage {
|
pub enum AsyncMessage {
|
||||||
/// A notice.
|
/// A notice.
|
||||||
///
|
///
|
||||||
@@ -110,18 +108,6 @@ pub enum AsyncMessage {
|
|||||||
Notification(Notification),
|
Notification(Notification),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Message returned by the `SimpleQuery` stream.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum SimpleQueryMessage {
|
|
||||||
/// A row of data.
|
|
||||||
Row(SimpleQueryRow),
|
|
||||||
/// A statement in the query has completed.
|
|
||||||
///
|
|
||||||
/// The number of rows modified or selected is returned.
|
|
||||||
CommandComplete(u64),
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slice_iter<'a>(
|
fn slice_iter<'a>(
|
||||||
s: &'a [&'a (dyn ToSql + Sync)],
|
s: &'a [&'a (dyn ToSql + Sync)],
|
||||||
) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a {
|
) -> impl ExactSizeIterator<Item = &'a (dyn ToSql + Sync)> + 'a {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::client::InnerClient;
|
use crate::client::{CachedTypeInfo, InnerClient};
|
||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
use crate::error::SqlState;
|
use crate::error::SqlState;
|
||||||
@@ -7,14 +7,13 @@ use crate::{query, slice_iter};
|
|||||||
use crate::{Column, Error, Statement};
|
use crate::{Column, Error, Statement};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use futures_util::{pin_mut, TryStreamExt};
|
use futures_util::{pin_mut, StreamExt, TryStreamExt};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use postgres_protocol2::message::backend::Message;
|
use postgres_protocol2::message::backend::Message;
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
use std::pin::{pin, Pin};
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub(crate) const TYPEINFO_QUERY: &str = "\
|
pub(crate) const TYPEINFO_QUERY: &str = "\
|
||||||
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
|
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
|
||||||
@@ -59,7 +58,8 @@ ORDER BY attnum
|
|||||||
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
pub async fn prepare(
|
pub async fn prepare(
|
||||||
client: &Arc<InnerClient>,
|
client: &mut InnerClient,
|
||||||
|
cache: &mut CachedTypeInfo,
|
||||||
query: &str,
|
query: &str,
|
||||||
types: &[Type],
|
types: &[Type],
|
||||||
) -> Result<Statement, Error> {
|
) -> Result<Statement, Error> {
|
||||||
@@ -86,7 +86,7 @@ pub async fn prepare(
|
|||||||
let mut parameters = vec![];
|
let mut parameters = vec![];
|
||||||
let mut it = parameter_description.parameters();
|
let mut it = parameter_description.parameters();
|
||||||
while let Some(oid) = it.next().map_err(Error::parse)? {
|
while let Some(oid) = it.next().map_err(Error::parse)? {
|
||||||
let type_ = get_type(client, oid).await?;
|
let type_ = get_type(client, cache, oid).await?;
|
||||||
parameters.push(type_);
|
parameters.push(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,24 +94,30 @@ pub async fn prepare(
|
|||||||
if let Some(row_description) = row_description {
|
if let Some(row_description) = row_description {
|
||||||
let mut it = row_description.fields();
|
let mut it = row_description.fields();
|
||||||
while let Some(field) = it.next().map_err(Error::parse)? {
|
while let Some(field) = it.next().map_err(Error::parse)? {
|
||||||
let type_ = get_type(client, field.type_oid()).await?;
|
let type_ = get_type(client, cache, field.type_oid()).await?;
|
||||||
let column = Column::new(field.name().to_string(), type_, field);
|
let column = Column::new(field.name().to_string(), type_, field);
|
||||||
columns.push(column);
|
columns.push(column);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Statement::new(client, name, parameters, columns))
|
Ok(Statement::new(name, parameters, columns))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_rec<'a>(
|
fn prepare_rec<'a>(
|
||||||
client: &'a Arc<InnerClient>,
|
client: &'a mut InnerClient,
|
||||||
|
cache: &'a mut CachedTypeInfo,
|
||||||
query: &'a str,
|
query: &'a str,
|
||||||
types: &'a [Type],
|
types: &'a [Type],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> {
|
) -> Pin<Box<dyn Future<Output = Result<Statement, Error>> + 'a + Send>> {
|
||||||
Box::pin(prepare(client, query, types))
|
Box::pin(prepare(client, cache, query, types))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result<Bytes, Error> {
|
fn encode(
|
||||||
|
client: &mut InnerClient,
|
||||||
|
name: &str,
|
||||||
|
query: &str,
|
||||||
|
types: &[Type],
|
||||||
|
) -> Result<Bytes, Error> {
|
||||||
if types.is_empty() {
|
if types.is_empty() {
|
||||||
debug!("preparing query {}: {}", name, query);
|
debug!("preparing query {}: {}", name, query);
|
||||||
} else {
|
} else {
|
||||||
@@ -126,16 +132,20 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_type(client: &Arc<InnerClient>, oid: Oid) -> Result<Type, Error> {
|
pub async fn get_type(
|
||||||
|
client: &mut InnerClient,
|
||||||
|
cache: &mut CachedTypeInfo,
|
||||||
|
oid: Oid,
|
||||||
|
) -> Result<Type, Error> {
|
||||||
if let Some(type_) = Type::from_oid(oid) {
|
if let Some(type_) = Type::from_oid(oid) {
|
||||||
return Ok(type_);
|
return Ok(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(type_) = client.type_(oid) {
|
if let Some(type_) = cache.type_(oid) {
|
||||||
return Ok(type_);
|
return Ok(type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = typeinfo_statement(client).await?;
|
let stmt = typeinfo_statement(client, cache).await?;
|
||||||
|
|
||||||
let rows = query::query(client, stmt, slice_iter(&[&oid])).await?;
|
let rows = query::query(client, stmt, slice_iter(&[&oid])).await?;
|
||||||
pin_mut!(rows);
|
pin_mut!(rows);
|
||||||
@@ -145,118 +155,141 @@ pub async fn get_type(client: &Arc<InnerClient>, oid: Oid) -> Result<Type, Error
|
|||||||
None => return Err(Error::unexpected_message()),
|
None => return Err(Error::unexpected_message()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let name: String = row.try_get(0)?;
|
let name: String = row.try_get(stmt.columns(), 0)?;
|
||||||
let type_: i8 = row.try_get(1)?;
|
let type_: i8 = row.try_get(stmt.columns(), 1)?;
|
||||||
let elem_oid: Oid = row.try_get(2)?;
|
let elem_oid: Oid = row.try_get(stmt.columns(), 2)?;
|
||||||
let rngsubtype: Option<Oid> = row.try_get(3)?;
|
let rngsubtype: Option<Oid> = row.try_get(stmt.columns(), 3)?;
|
||||||
let basetype: Oid = row.try_get(4)?;
|
let basetype: Oid = row.try_get(stmt.columns(), 4)?;
|
||||||
let schema: String = row.try_get(5)?;
|
let schema: String = row.try_get(stmt.columns(), 5)?;
|
||||||
let relid: Oid = row.try_get(6)?;
|
let relid: Oid = row.try_get(stmt.columns(), 6)?;
|
||||||
|
|
||||||
let kind = if type_ == b'e' as i8 {
|
let kind = if type_ == b'e' as i8 {
|
||||||
let variants = get_enum_variants(client, oid).await?;
|
let variants = get_enum_variants(client, cache, oid).await?;
|
||||||
Kind::Enum(variants)
|
Kind::Enum(variants)
|
||||||
} else if type_ == b'p' as i8 {
|
} else if type_ == b'p' as i8 {
|
||||||
Kind::Pseudo
|
Kind::Pseudo
|
||||||
} else if basetype != 0 {
|
} else if basetype != 0 {
|
||||||
let type_ = get_type_rec(client, basetype).await?;
|
let type_ = get_type_rec(client, cache, basetype).await?;
|
||||||
Kind::Domain(type_)
|
Kind::Domain(type_)
|
||||||
} else if elem_oid != 0 {
|
} else if elem_oid != 0 {
|
||||||
let type_ = get_type_rec(client, elem_oid).await?;
|
let type_ = get_type_rec(client, cache, elem_oid).await?;
|
||||||
Kind::Array(type_)
|
Kind::Array(type_)
|
||||||
} else if relid != 0 {
|
} else if relid != 0 {
|
||||||
let fields = get_composite_fields(client, relid).await?;
|
let fields = get_composite_fields(client, cache, relid).await?;
|
||||||
Kind::Composite(fields)
|
Kind::Composite(fields)
|
||||||
} else if let Some(rngsubtype) = rngsubtype {
|
} else if let Some(rngsubtype) = rngsubtype {
|
||||||
let type_ = get_type_rec(client, rngsubtype).await?;
|
let type_ = get_type_rec(client, cache, rngsubtype).await?;
|
||||||
Kind::Range(type_)
|
Kind::Range(type_)
|
||||||
} else {
|
} else {
|
||||||
Kind::Simple
|
Kind::Simple
|
||||||
};
|
};
|
||||||
|
|
||||||
let type_ = Type::new(name, oid, kind, schema);
|
let type_ = Type::new(name, oid, kind, schema);
|
||||||
client.set_type(oid, &type_);
|
cache.set_type(oid, &type_);
|
||||||
|
|
||||||
Ok(type_)
|
Ok(type_)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_type_rec<'a>(
|
fn get_type_rec<'a>(
|
||||||
client: &'a Arc<InnerClient>,
|
client: &'a mut InnerClient,
|
||||||
|
cache: &'a mut CachedTypeInfo,
|
||||||
oid: Oid,
|
oid: Oid,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Type, Error>> + Send + 'a>> {
|
||||||
Box::pin(get_type(client, oid))
|
Box::pin(get_type(client, cache, oid))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
async fn typeinfo_statement<'c>(
|
||||||
if let Some(stmt) = client.typeinfo() {
|
client: &mut InnerClient,
|
||||||
return Ok(stmt);
|
cache: &'c mut CachedTypeInfo,
|
||||||
|
) -> Result<&'c Statement, Error> {
|
||||||
|
if cache.typeinfo().is_some() {
|
||||||
|
// needed to get around a borrow checker limitation
|
||||||
|
return Ok(cache.typeinfo().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = match prepare_rec(client, TYPEINFO_QUERY, &[]).await {
|
let stmt = match prepare_rec(client, cache, TYPEINFO_QUERY, &[]).await {
|
||||||
Ok(stmt) => stmt,
|
Ok(stmt) => stmt,
|
||||||
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {
|
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {
|
||||||
prepare_rec(client, TYPEINFO_FALLBACK_QUERY, &[]).await?
|
prepare_rec(client, cache, TYPEINFO_FALLBACK_QUERY, &[]).await?
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
client.set_typeinfo(&stmt);
|
Ok(cache.set_typeinfo(stmt))
|
||||||
Ok(stmt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_enum_variants(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<String>, Error> {
|
async fn get_enum_variants(
|
||||||
let stmt = typeinfo_enum_statement(client).await?;
|
client: &mut InnerClient,
|
||||||
|
cache: &mut CachedTypeInfo,
|
||||||
|
oid: Oid,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
|
let stmt = typeinfo_enum_statement(client, cache).await?;
|
||||||
|
|
||||||
query::query(client, stmt, slice_iter(&[&oid]))
|
let mut out = vec![];
|
||||||
.await?
|
|
||||||
.and_then(|row| async move { row.try_get(0) })
|
let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
|
||||||
.try_collect()
|
while let Some(row) = rows.next().await {
|
||||||
.await
|
out.push(row?.try_get(stmt.columns(), 0)?)
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_enum_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
async fn typeinfo_enum_statement<'c>(
|
||||||
if let Some(stmt) = client.typeinfo_enum() {
|
client: &mut InnerClient,
|
||||||
return Ok(stmt);
|
cache: &'c mut CachedTypeInfo,
|
||||||
|
) -> Result<&'c Statement, Error> {
|
||||||
|
if cache.typeinfo_enum().is_some() {
|
||||||
|
// needed to get around a borrow checker limitation
|
||||||
|
return Ok(cache.typeinfo_enum().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = match prepare_rec(client, TYPEINFO_ENUM_QUERY, &[]).await {
|
let stmt = match prepare_rec(client, cache, TYPEINFO_ENUM_QUERY, &[]).await {
|
||||||
Ok(stmt) => stmt,
|
Ok(stmt) => stmt,
|
||||||
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => {
|
Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => {
|
||||||
prepare_rec(client, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await?
|
prepare_rec(client, cache, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await?
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
client.set_typeinfo_enum(&stmt);
|
Ok(cache.set_typeinfo_enum(stmt))
|
||||||
Ok(stmt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_composite_fields(client: &Arc<InnerClient>, oid: Oid) -> Result<Vec<Field>, Error> {
|
async fn get_composite_fields(
|
||||||
let stmt = typeinfo_composite_statement(client).await?;
|
client: &mut InnerClient,
|
||||||
|
cache: &mut CachedTypeInfo,
|
||||||
|
oid: Oid,
|
||||||
|
) -> Result<Vec<Field>, Error> {
|
||||||
|
let stmt = typeinfo_composite_statement(client, cache).await?;
|
||||||
|
|
||||||
let rows = query::query(client, stmt, slice_iter(&[&oid]))
|
let mut rows = pin!(query::query(client, stmt, slice_iter(&[&oid])).await?);
|
||||||
.await?
|
|
||||||
.try_collect::<Vec<_>>()
|
let mut oids = vec![];
|
||||||
.await?;
|
while let Some(row) = rows.next().await {
|
||||||
|
let row = row?;
|
||||||
|
let name = row.try_get(stmt.columns(), 0)?;
|
||||||
|
let oid = row.try_get(stmt.columns(), 1)?;
|
||||||
|
oids.push((name, oid));
|
||||||
|
}
|
||||||
|
|
||||||
let mut fields = vec![];
|
let mut fields = vec![];
|
||||||
for row in rows {
|
for (name, oid) in oids {
|
||||||
let name = row.try_get(0)?;
|
let type_ = get_type_rec(client, cache, oid).await?;
|
||||||
let oid = row.try_get(1)?;
|
|
||||||
let type_ = get_type_rec(client, oid).await?;
|
|
||||||
fields.push(Field::new(name, type_));
|
fields.push(Field::new(name, type_));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(fields)
|
Ok(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn typeinfo_composite_statement(client: &Arc<InnerClient>) -> Result<Statement, Error> {
|
async fn typeinfo_composite_statement<'c>(
|
||||||
if let Some(stmt) = client.typeinfo_composite() {
|
client: &mut InnerClient,
|
||||||
return Ok(stmt);
|
cache: &'c mut CachedTypeInfo,
|
||||||
|
) -> Result<&'c Statement, Error> {
|
||||||
|
if cache.typeinfo_composite().is_some() {
|
||||||
|
// needed to get around a borrow checker limitation
|
||||||
|
return Ok(cache.typeinfo_composite().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let stmt = prepare_rec(client, TYPEINFO_COMPOSITE_QUERY, &[]).await?;
|
let stmt = prepare_rec(client, cache, TYPEINFO_COMPOSITE_QUERY, &[]).await?;
|
||||||
|
|
||||||
client.set_typeinfo_composite(&stmt);
|
Ok(cache.set_typeinfo_composite(stmt))
|
||||||
Ok(stmt)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ use postgres_types2::{Format, ToSql, Type};
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::marker::PhantomPinned;
|
use std::marker::PhantomPinned;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]);
|
struct BorrowToSqlParamsDebug<'a>(&'a [&'a (dyn ToSql + Sync)]);
|
||||||
@@ -26,10 +25,10 @@ impl fmt::Debug for BorrowToSqlParamsDebug<'_> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn query<'a, I>(
|
pub async fn query<'a, I>(
|
||||||
client: &InnerClient,
|
client: &mut InnerClient,
|
||||||
statement: Statement,
|
statement: &Statement,
|
||||||
params: I,
|
params: I,
|
||||||
) -> Result<RowStream, Error>
|
) -> Result<RawRowStream, Error>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
I::IntoIter: ExactSizeIterator,
|
I::IntoIter: ExactSizeIterator,
|
||||||
@@ -41,13 +40,12 @@ where
|
|||||||
statement.name(),
|
statement.name(),
|
||||||
BorrowToSqlParamsDebug(params.as_slice()),
|
BorrowToSqlParamsDebug(params.as_slice()),
|
||||||
);
|
);
|
||||||
encode(client, &statement, params)?
|
encode(client, statement, params)?
|
||||||
} else {
|
} else {
|
||||||
encode(client, &statement, params)?
|
encode(client, statement, params)?
|
||||||
};
|
};
|
||||||
let responses = start(client, buf).await?;
|
let responses = start(client, buf).await?;
|
||||||
Ok(RowStream {
|
Ok(RawRowStream {
|
||||||
statement,
|
|
||||||
responses,
|
responses,
|
||||||
command_tag: None,
|
command_tag: None,
|
||||||
status: ReadyForQueryStatus::Unknown,
|
status: ReadyForQueryStatus::Unknown,
|
||||||
@@ -57,7 +55,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn query_txt<S, I>(
|
pub async fn query_txt<S, I>(
|
||||||
client: &Arc<InnerClient>,
|
client: &mut InnerClient,
|
||||||
query: &str,
|
query: &str,
|
||||||
params: I,
|
params: I,
|
||||||
) -> Result<RowStream, Error>
|
) -> Result<RowStream, Error>
|
||||||
@@ -157,49 +155,6 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn execute<'a, I>(
|
|
||||||
client: &InnerClient,
|
|
||||||
statement: Statement,
|
|
||||||
params: I,
|
|
||||||
) -> Result<u64, Error>
|
|
||||||
where
|
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
|
||||||
I::IntoIter: ExactSizeIterator,
|
|
||||||
{
|
|
||||||
let buf = if log_enabled!(Level::Debug) {
|
|
||||||
let params = params.into_iter().collect::<Vec<_>>();
|
|
||||||
debug!(
|
|
||||||
"executing statement {} with parameters: {:?}",
|
|
||||||
statement.name(),
|
|
||||||
BorrowToSqlParamsDebug(params.as_slice()),
|
|
||||||
);
|
|
||||||
encode(client, &statement, params)?
|
|
||||||
} else {
|
|
||||||
encode(client, &statement, params)?
|
|
||||||
};
|
|
||||||
let mut responses = start(client, buf).await?;
|
|
||||||
|
|
||||||
let mut rows = 0;
|
|
||||||
loop {
|
|
||||||
match responses.next().await? {
|
|
||||||
Message::DataRow(_) => {}
|
|
||||||
Message::CommandComplete(body) => {
|
|
||||||
rows = body
|
|
||||||
.tag()
|
|
||||||
.map_err(Error::parse)?
|
|
||||||
.rsplit(' ')
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(0);
|
|
||||||
}
|
|
||||||
Message::EmptyQueryResponse => rows = 0,
|
|
||||||
Message::ReadyForQuery(_) => return Ok(rows),
|
|
||||||
_ => return Err(Error::unexpected_message()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
||||||
let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
||||||
|
|
||||||
@@ -211,7 +166,11 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result<Responses, Error> {
|
|||||||
Ok(responses)
|
Ok(responses)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result<Bytes, Error>
|
pub fn encode<'a, I>(
|
||||||
|
client: &mut InnerClient,
|
||||||
|
statement: &Statement,
|
||||||
|
params: I,
|
||||||
|
) -> Result<Bytes, Error>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
I: IntoIterator<Item = &'a (dyn ToSql + Sync)>,
|
||||||
I::IntoIter: ExactSizeIterator,
|
I::IntoIter: ExactSizeIterator,
|
||||||
@@ -296,11 +255,7 @@ impl Stream for RowStream {
|
|||||||
loop {
|
loop {
|
||||||
match ready!(this.responses.poll_next(cx)?) {
|
match ready!(this.responses.poll_next(cx)?) {
|
||||||
Message::DataRow(body) => {
|
Message::DataRow(body) => {
|
||||||
return Poll::Ready(Some(Ok(Row::new(
|
return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
|
||||||
this.statement.clone(),
|
|
||||||
body,
|
|
||||||
*this.output_format,
|
|
||||||
)?)))
|
|
||||||
}
|
}
|
||||||
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
||||||
Message::CommandComplete(body) => {
|
Message::CommandComplete(body) => {
|
||||||
@@ -338,3 +293,41 @@ impl RowStream {
|
|||||||
self.status
|
self.status
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
/// A stream of table rows.
|
||||||
|
pub struct RawRowStream {
|
||||||
|
responses: Responses,
|
||||||
|
command_tag: Option<String>,
|
||||||
|
output_format: Format,
|
||||||
|
status: ReadyForQueryStatus,
|
||||||
|
#[pin]
|
||||||
|
_p: PhantomPinned,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stream for RawRowStream {
|
||||||
|
type Item = Result<Row, Error>;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let this = self.project();
|
||||||
|
loop {
|
||||||
|
match ready!(this.responses.poll_next(cx)?) {
|
||||||
|
Message::DataRow(body) => {
|
||||||
|
return Poll::Ready(Some(Ok(Row::new(body, *this.output_format)?)))
|
||||||
|
}
|
||||||
|
Message::EmptyQueryResponse | Message::PortalSuspended => {}
|
||||||
|
Message::CommandComplete(body) => {
|
||||||
|
if let Ok(tag) = body.tag() {
|
||||||
|
*this.command_tag = Some(tag.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Message::ReadyForQuery(status) => {
|
||||||
|
*this.status = status.into();
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,103 +1,16 @@
|
|||||||
//! Rows.
|
//! Rows.
|
||||||
|
|
||||||
use crate::row::sealed::{AsName, Sealed};
|
|
||||||
use crate::simple_query::SimpleColumn;
|
|
||||||
use crate::statement::Column;
|
use crate::statement::Column;
|
||||||
use crate::types::{FromSql, Type, WrongType};
|
use crate::types::{FromSql, Type, WrongType};
|
||||||
use crate::{Error, Statement};
|
use crate::Error;
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use postgres_protocol2::message::backend::DataRowBody;
|
use postgres_protocol2::message::backend::DataRowBody;
|
||||||
use postgres_types2::{Format, WrongFormat};
|
use postgres_types2::{Format, WrongFormat};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::str;
|
use std::str;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
mod sealed {
|
|
||||||
pub trait Sealed {}
|
|
||||||
|
|
||||||
pub trait AsName {
|
|
||||||
fn as_name(&self) -> &str;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsName for Column {
|
|
||||||
fn as_name(&self) -> &str {
|
|
||||||
self.name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsName for String {
|
|
||||||
fn as_name(&self) -> &str {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A trait implemented by types that can index into columns of a row.
|
|
||||||
///
|
|
||||||
/// This cannot be implemented outside of this crate.
|
|
||||||
pub trait RowIndex: Sealed {
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
|
||||||
where
|
|
||||||
T: AsName;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sealed for usize {}
|
|
||||||
|
|
||||||
impl RowIndex for usize {
|
|
||||||
#[inline]
|
|
||||||
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
|
||||||
where
|
|
||||||
T: AsName,
|
|
||||||
{
|
|
||||||
if *self >= columns.len() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(*self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sealed for str {}
|
|
||||||
|
|
||||||
impl RowIndex for str {
|
|
||||||
#[inline]
|
|
||||||
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
|
|
||||||
where
|
|
||||||
T: AsName,
|
|
||||||
{
|
|
||||||
if let Some(idx) = columns.iter().position(|d| d.as_name() == self) {
|
|
||||||
return Some(idx);
|
|
||||||
};
|
|
||||||
|
|
||||||
// FIXME ASCII-only case insensitivity isn't really the right thing to
|
|
||||||
// do. Postgres itself uses a dubious wrapper around tolower and JDBC
|
|
||||||
// uses the US locale.
|
|
||||||
columns
|
|
||||||
.iter()
|
|
||||||
.position(|d| d.as_name().eq_ignore_ascii_case(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Sealed for &T where T: ?Sized + Sealed {}
|
|
||||||
|
|
||||||
impl<T> RowIndex for &T
|
|
||||||
where
|
|
||||||
T: ?Sized + RowIndex,
|
|
||||||
{
|
|
||||||
#[inline]
|
|
||||||
fn __idx<U>(&self, columns: &[U]) -> Option<usize>
|
|
||||||
where
|
|
||||||
U: AsName,
|
|
||||||
{
|
|
||||||
T::__idx(*self, columns)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A row of data returned from the database by a query.
|
/// A row of data returned from the database by a query.
|
||||||
pub struct Row {
|
pub struct Row {
|
||||||
statement: Statement,
|
|
||||||
output_format: Format,
|
output_format: Format,
|
||||||
body: DataRowBody,
|
body: DataRowBody,
|
||||||
ranges: Vec<Option<Range<usize>>>,
|
ranges: Vec<Option<Range<usize>>>,
|
||||||
@@ -105,80 +18,33 @@ pub struct Row {
|
|||||||
|
|
||||||
impl fmt::Debug for Row {
|
impl fmt::Debug for Row {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.debug_struct("Row")
|
f.debug_struct("Row").finish()
|
||||||
.field("columns", &self.columns())
|
|
||||||
.finish()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Row {
|
impl Row {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
statement: Statement,
|
// statement: Statement,
|
||||||
body: DataRowBody,
|
body: DataRowBody,
|
||||||
output_format: Format,
|
output_format: Format,
|
||||||
) -> Result<Row, Error> {
|
) -> Result<Row, Error> {
|
||||||
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
||||||
Ok(Row {
|
Ok(Row {
|
||||||
statement,
|
|
||||||
body,
|
body,
|
||||||
ranges,
|
ranges,
|
||||||
output_format,
|
output_format,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns information about the columns of data in the row.
|
pub(crate) fn try_get<'a, T>(&'a self, columns: &[Column], idx: usize) -> Result<T, Error>
|
||||||
pub fn columns(&self) -> &[Column] {
|
|
||||||
self.statement.columns()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Determines if the row contains no values.
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.len() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of values in the row.
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.columns().len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserializes a value from the row.
|
|
||||||
///
|
|
||||||
/// The value can be specified either by its numeric index in the row, or by its column name.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
|
|
||||||
pub fn get<'a, I, T>(&'a self, idx: I) -> T
|
|
||||||
where
|
where
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
T: FromSql<'a>,
|
T: FromSql<'a>,
|
||||||
{
|
{
|
||||||
match self.get_inner(&idx) {
|
let Some(column) = columns.get(idx) else {
|
||||||
Ok(ok) => ok,
|
return Err(Error::column(idx.to_string()));
|
||||||
Err(err) => panic!("error retrieving column {}: {}", idx, err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `Row::get`, but returns a `Result` rather than panicking.
|
|
||||||
pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result<T, Error>
|
|
||||||
where
|
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
T: FromSql<'a>,
|
|
||||||
{
|
|
||||||
self.get_inner(&idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result<T, Error>
|
|
||||||
where
|
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
T: FromSql<'a>,
|
|
||||||
{
|
|
||||||
let idx = match idx.__idx(self.columns()) {
|
|
||||||
Some(idx) => idx,
|
|
||||||
None => return Err(Error::column(idx.to_string())),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let ty = self.columns()[idx].type_();
|
let ty = column.type_();
|
||||||
if !T::accepts(ty) {
|
if !T::accepts(ty) {
|
||||||
return Err(Error::from_sql(
|
return Err(Error::from_sql(
|
||||||
Box::new(WrongType::new::<T>(ty.clone())),
|
Box::new(WrongType::new::<T>(ty.clone())),
|
||||||
@@ -216,85 +82,3 @@ impl Row {
|
|||||||
self.body.buffer().len()
|
self.body.buffer().len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsName for SimpleColumn {
|
|
||||||
fn as_name(&self) -> &str {
|
|
||||||
self.name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A row of data returned from the database by a simple query.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SimpleQueryRow {
|
|
||||||
columns: Arc<[SimpleColumn]>,
|
|
||||||
body: DataRowBody,
|
|
||||||
ranges: Vec<Option<Range<usize>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimpleQueryRow {
|
|
||||||
#[allow(clippy::new_ret_no_self)]
|
|
||||||
pub(crate) fn new(
|
|
||||||
columns: Arc<[SimpleColumn]>,
|
|
||||||
body: DataRowBody,
|
|
||||||
) -> Result<SimpleQueryRow, Error> {
|
|
||||||
let ranges = body.ranges().collect().map_err(Error::parse)?;
|
|
||||||
Ok(SimpleQueryRow {
|
|
||||||
columns,
|
|
||||||
body,
|
|
||||||
ranges,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns information about the columns of data in the row.
|
|
||||||
pub fn columns(&self) -> &[SimpleColumn] {
|
|
||||||
&self.columns
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Determines if the row contains no values.
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.len() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of values in the row.
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.columns.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a value from the row.
|
|
||||||
///
|
|
||||||
/// The value can be specified either by its numeric index in the row, or by its column name.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
|
|
||||||
pub fn get<I>(&self, idx: I) -> Option<&str>
|
|
||||||
where
|
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
{
|
|
||||||
match self.get_inner(&idx) {
|
|
||||||
Ok(ok) => ok,
|
|
||||||
Err(err) => panic!("error retrieving column {}: {}", idx, err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking.
|
|
||||||
pub fn try_get<I>(&self, idx: I) -> Result<Option<&str>, Error>
|
|
||||||
where
|
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
{
|
|
||||||
self.get_inner(&idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_inner<I>(&self, idx: &I) -> Result<Option<&str>, Error>
|
|
||||||
where
|
|
||||||
I: RowIndex + fmt::Display,
|
|
||||||
{
|
|
||||||
let idx = match idx.__idx(&self.columns) {
|
|
||||||
Some(idx) => idx,
|
|
||||||
None => return Err(Error::column(idx.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]);
|
|
||||||
FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,52 +1,14 @@
|
|||||||
use crate::client::{InnerClient, Responses};
|
use crate::client::InnerClient;
|
||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
use crate::{Error, ReadyForQueryStatus, SimpleQueryMessage, SimpleQueryRow};
|
use crate::{Error, ReadyForQueryStatus};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use fallible_iterator::FallibleIterator;
|
|
||||||
use futures_util::{ready, Stream};
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use pin_project_lite::pin_project;
|
|
||||||
use postgres_protocol2::message::backend::Message;
|
use postgres_protocol2::message::backend::Message;
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
use std::marker::PhantomPinned;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
|
|
||||||
/// Information about a column of a single query row.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SimpleColumn {
|
|
||||||
name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimpleColumn {
|
|
||||||
pub(crate) fn new(name: String) -> SimpleColumn {
|
|
||||||
SimpleColumn { name }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the name of the column.
|
|
||||||
pub fn name(&self) -> &str {
|
|
||||||
&self.name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn simple_query(client: &InnerClient, query: &str) -> Result<SimpleQueryStream, Error> {
|
|
||||||
debug!("executing simple query: {}", query);
|
|
||||||
|
|
||||||
let buf = encode(client, query)?;
|
|
||||||
let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?;
|
|
||||||
|
|
||||||
Ok(SimpleQueryStream {
|
|
||||||
responses,
|
|
||||||
columns: None,
|
|
||||||
status: ReadyForQueryStatus::Unknown,
|
|
||||||
_p: PhantomPinned,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn batch_execute(
|
pub async fn batch_execute(
|
||||||
client: &InnerClient,
|
client: &mut InnerClient,
|
||||||
query: &str,
|
query: &str,
|
||||||
) -> Result<ReadyForQueryStatus, Error> {
|
) -> Result<ReadyForQueryStatus, Error> {
|
||||||
debug!("executing statement batch: {}", query);
|
debug!("executing statement batch: {}", query);
|
||||||
@@ -66,77 +28,9 @@ pub async fn batch_execute(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn encode(client: &InnerClient, query: &str) -> Result<Bytes, Error> {
|
pub(crate) fn encode(client: &mut InnerClient, query: &str) -> Result<Bytes, Error> {
|
||||||
client.with_buf(|buf| {
|
client.with_buf(|buf| {
|
||||||
frontend::query(query, buf).map_err(Error::encode)?;
|
frontend::query(query, buf).map_err(Error::encode)?;
|
||||||
Ok(buf.split().freeze())
|
Ok(buf.split().freeze())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pin_project! {
|
|
||||||
/// A stream of simple query results.
|
|
||||||
pub struct SimpleQueryStream {
|
|
||||||
responses: Responses,
|
|
||||||
columns: Option<Arc<[SimpleColumn]>>,
|
|
||||||
status: ReadyForQueryStatus,
|
|
||||||
#[pin]
|
|
||||||
_p: PhantomPinned,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimpleQueryStream {
|
|
||||||
/// Returns if the connection is ready for querying, with the status of the connection.
|
|
||||||
///
|
|
||||||
/// This might be available only after the stream has been exhausted.
|
|
||||||
pub fn ready_status(&self) -> ReadyForQueryStatus {
|
|
||||||
self.status
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Stream for SimpleQueryStream {
|
|
||||||
type Item = Result<SimpleQueryMessage, Error>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
||||||
let this = self.project();
|
|
||||||
loop {
|
|
||||||
match ready!(this.responses.poll_next(cx)?) {
|
|
||||||
Message::CommandComplete(body) => {
|
|
||||||
let rows = body
|
|
||||||
.tag()
|
|
||||||
.map_err(Error::parse)?
|
|
||||||
.rsplit(' ')
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(0);
|
|
||||||
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows))));
|
|
||||||
}
|
|
||||||
Message::EmptyQueryResponse => {
|
|
||||||
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0))));
|
|
||||||
}
|
|
||||||
Message::RowDescription(body) => {
|
|
||||||
let columns = body
|
|
||||||
.fields()
|
|
||||||
.map(|f| Ok(SimpleColumn::new(f.name().to_string())))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.map_err(Error::parse)?
|
|
||||||
.into();
|
|
||||||
|
|
||||||
*this.columns = Some(columns);
|
|
||||||
}
|
|
||||||
Message::DataRow(body) => {
|
|
||||||
let row = match &this.columns {
|
|
||||||
Some(columns) => SimpleQueryRow::new(columns.clone(), body)?,
|
|
||||||
None => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
|
||||||
};
|
|
||||||
return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row))));
|
|
||||||
}
|
|
||||||
Message::ReadyForQuery(s) => {
|
|
||||||
*this.status = s.into();
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,64 +1,33 @@
|
|||||||
use crate::client::InnerClient;
|
|
||||||
use crate::codec::FrontendMessage;
|
|
||||||
use crate::connection::RequestMessages;
|
|
||||||
use crate::types::Type;
|
use crate::types::Type;
|
||||||
use postgres_protocol2::{
|
use postgres_protocol2::{message::backend::Field, Oid};
|
||||||
message::{backend::Field, frontend},
|
use std::fmt;
|
||||||
Oid,
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
fmt,
|
|
||||||
sync::{Arc, Weak},
|
|
||||||
};
|
|
||||||
|
|
||||||
struct StatementInner {
|
struct StatementInner {
|
||||||
client: Weak<InnerClient>,
|
|
||||||
name: String,
|
name: String,
|
||||||
params: Vec<Type>,
|
params: Vec<Type>,
|
||||||
columns: Vec<Column>,
|
columns: Vec<Column>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for StatementInner {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(client) = self.client.upgrade() {
|
|
||||||
let buf = client.with_buf(|buf| {
|
|
||||||
frontend::close(b'S', &self.name, buf).unwrap();
|
|
||||||
frontend::sync(buf);
|
|
||||||
buf.split().freeze()
|
|
||||||
});
|
|
||||||
let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A prepared statement.
|
/// A prepared statement.
|
||||||
///
|
///
|
||||||
/// Prepared statements can only be used with the connection that created them.
|
/// Prepared statements can only be used with the connection that created them.
|
||||||
#[derive(Clone)]
|
pub struct Statement(StatementInner);
|
||||||
pub struct Statement(Arc<StatementInner>);
|
|
||||||
|
|
||||||
impl Statement {
|
impl Statement {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(name: String, params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
||||||
inner: &Arc<InnerClient>,
|
Statement(StatementInner {
|
||||||
name: String,
|
|
||||||
params: Vec<Type>,
|
|
||||||
columns: Vec<Column>,
|
|
||||||
) -> Statement {
|
|
||||||
Statement(Arc::new(StatementInner {
|
|
||||||
client: Arc::downgrade(inner),
|
|
||||||
name,
|
name,
|
||||||
params,
|
params,
|
||||||
columns,
|
columns,
|
||||||
}))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
pub(crate) fn new_anonymous(params: Vec<Type>, columns: Vec<Column>) -> Statement {
|
||||||
Statement(Arc::new(StatementInner {
|
Statement(StatementInner {
|
||||||
client: Weak::new(),
|
|
||||||
name: String::new(),
|
name: String::new(),
|
||||||
params,
|
params,
|
||||||
columns,
|
columns,
|
||||||
}))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn name(&self) -> &str {
|
pub(crate) fn name(&self) -> &str {
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
use crate::to_statement::private::{Sealed, ToStatementType};
|
|
||||||
use crate::Statement;
|
|
||||||
|
|
||||||
mod private {
|
|
||||||
use crate::{Client, Error, Statement};
|
|
||||||
|
|
||||||
pub trait Sealed {}
|
|
||||||
|
|
||||||
pub enum ToStatementType<'a> {
|
|
||||||
Statement(&'a Statement),
|
|
||||||
Query(&'a str),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToStatementType<'_> {
|
|
||||||
pub async fn into_statement(self, client: &Client) -> Result<Statement, Error> {
|
|
||||||
match self {
|
|
||||||
ToStatementType::Statement(s) => Ok(s.clone()),
|
|
||||||
ToStatementType::Query(s) => client.prepare(s).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A trait abstracting over prepared and unprepared statements.
|
|
||||||
///
|
|
||||||
/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which
|
|
||||||
/// was prepared previously.
|
|
||||||
///
|
|
||||||
/// This trait is "sealed" and cannot be implemented by anything outside this crate.
|
|
||||||
pub trait ToStatement: Sealed {
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn __convert(&self) -> ToStatementType<'_>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToStatement for Statement {
|
|
||||||
fn __convert(&self) -> ToStatementType<'_> {
|
|
||||||
ToStatementType::Statement(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sealed for Statement {}
|
|
||||||
|
|
||||||
impl ToStatement for str {
|
|
||||||
fn __convert(&self) -> ToStatementType<'_> {
|
|
||||||
ToStatementType::Query(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sealed for str {}
|
|
||||||
|
|
||||||
impl ToStatement for String {
|
|
||||||
fn __convert(&self) -> ToStatementType<'_> {
|
|
||||||
ToStatementType::Query(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sealed for String {}
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
use crate::codec::FrontendMessage;
|
use crate::codec::FrontendMessage;
|
||||||
use crate::connection::RequestMessages;
|
use crate::connection::RequestMessages;
|
||||||
use crate::query::RowStream;
|
|
||||||
use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
|
use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
|
||||||
use postgres_protocol2::message::frontend;
|
use postgres_protocol2::message::frontend;
|
||||||
|
|
||||||
@@ -19,13 +18,13 @@ impl Drop for Transaction<'_> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let buf = self.client.inner().with_buf(|buf| {
|
let buf = self.client.inner.with_buf(|buf| {
|
||||||
frontend::query("ROLLBACK", buf).unwrap();
|
frontend::query("ROLLBACK", buf).unwrap();
|
||||||
buf.split().freeze()
|
buf.split().freeze()
|
||||||
});
|
});
|
||||||
let _ = self
|
let _ = self
|
||||||
.client
|
.client
|
||||||
.inner()
|
.inner
|
||||||
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
.send(RequestMessages::Single(FrontendMessage::Raw(buf)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -52,23 +51,13 @@ impl<'a> Transaction<'a> {
|
|||||||
self.client.batch_execute("ROLLBACK").await
|
self.client.batch_execute("ROLLBACK").await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Like `Client::query_raw_txt`.
|
|
||||||
pub async fn query_raw_txt<S, I>(&self, statement: &str, params: I) -> Result<RowStream, Error>
|
|
||||||
where
|
|
||||||
S: AsRef<str>,
|
|
||||||
I: IntoIterator<Item = Option<S>>,
|
|
||||||
I::IntoIter: ExactSizeIterator,
|
|
||||||
{
|
|
||||||
self.client.query_raw_txt(statement, params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `Client::cancel_token`.
|
/// Like `Client::cancel_token`.
|
||||||
pub fn cancel_token(&self) -> CancelToken {
|
pub fn cancel_token(&self) -> CancelToken {
|
||||||
self.client.cancel_token()
|
self.client.cancel_token()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the underlying `Client`.
|
/// Returns a reference to the underlying `Client`.
|
||||||
pub fn client(&self) -> &Client {
|
pub fn client(&mut self) -> &mut Client {
|
||||||
self.client
|
self.client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ camino = { workspace = true, features = ["serde1"] }
|
|||||||
humantime-serde.workspace = true
|
humantime-serde.workspace = true
|
||||||
hyper = { workspace = true, features = ["client"] }
|
hyper = { workspace = true, features = ["client"] }
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
reqwest.workspace = true
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio = { workspace = true, features = ["sync", "fs", "io-util"] }
|
tokio = { workspace = true, features = ["sync", "fs", "io-util"] }
|
||||||
|
|||||||
@@ -13,12 +13,10 @@ use std::time::Duration;
|
|||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
use super::REMOTE_STORAGE_PREFIX_SEPARATOR;
|
use super::REMOTE_STORAGE_PREFIX_SEPARATOR;
|
||||||
use anyhow::Context;
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range};
|
use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range};
|
||||||
use azure_core::HttpClient;
|
|
||||||
use azure_core::TransportOptions;
|
|
||||||
use azure_core::{Continuable, RetryOptions};
|
use azure_core::{Continuable, RetryOptions};
|
||||||
|
use azure_identity::DefaultAzureCredential;
|
||||||
use azure_storage::StorageCredentials;
|
use azure_storage::StorageCredentials;
|
||||||
use azure_storage_blobs::blob::CopyStatus;
|
use azure_storage_blobs::blob::CopyStatus;
|
||||||
use azure_storage_blobs::prelude::ClientBuilder;
|
use azure_storage_blobs::prelude::ClientBuilder;
|
||||||
@@ -78,18 +76,12 @@ impl AzureBlobStorage {
|
|||||||
let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") {
|
let credentials = if let Ok(access_key) = env::var("AZURE_STORAGE_ACCESS_KEY") {
|
||||||
StorageCredentials::access_key(account.clone(), access_key)
|
StorageCredentials::access_key(account.clone(), access_key)
|
||||||
} else {
|
} else {
|
||||||
let token_credential = azure_identity::create_default_credential()
|
let token_credential = DefaultAzureCredential::default();
|
||||||
.context("trying to obtain Azure default credentials")?;
|
StorageCredentials::token_credential(Arc::new(token_credential))
|
||||||
StorageCredentials::token_credential(token_credential)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let builder = ClientBuilder::new(account, credentials)
|
// we have an outer retry
|
||||||
// we have an outer retry
|
let builder = ClientBuilder::new(account, credentials).retry(RetryOptions::none());
|
||||||
.retry(RetryOptions::none())
|
|
||||||
// Customize transport to configure conneciton pooling
|
|
||||||
.transport(TransportOptions::new(Self::reqwest_client(
|
|
||||||
azure_config.conn_pool_size,
|
|
||||||
)));
|
|
||||||
|
|
||||||
let client = builder.container_client(azure_config.container_name.to_owned());
|
let client = builder.container_client(azure_config.container_name.to_owned());
|
||||||
|
|
||||||
@@ -114,14 +106,6 @@ impl AzureBlobStorage {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reqwest_client(conn_pool_size: usize) -> Arc<dyn HttpClient> {
|
|
||||||
let client = reqwest::ClientBuilder::new()
|
|
||||||
.pool_max_idle_per_host(conn_pool_size)
|
|
||||||
.build()
|
|
||||||
.expect("failed to build `reqwest` client");
|
|
||||||
Arc::new(client)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn relative_path_to_name(&self, path: &RemotePath) -> String {
|
pub fn relative_path_to_name(&self, path: &RemotePath) -> String {
|
||||||
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
||||||
let path_string = path.get_path().as_str();
|
let path_string = path.get_path().as_str();
|
||||||
@@ -560,9 +544,9 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects(
|
async fn delete_objects<'a>(
|
||||||
&self,
|
&self,
|
||||||
paths: &[RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let kind = RequestKind::Delete;
|
let kind = RequestKind::Delete;
|
||||||
@@ -640,10 +624,6 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_keys_per_delete(&self) -> usize {
|
|
||||||
super::MAX_KEYS_PER_DELETE_AZURE
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn copy(
|
async fn copy(
|
||||||
&self,
|
&self,
|
||||||
from: &RemotePath,
|
from: &RemotePath,
|
||||||
|
|||||||
@@ -114,16 +114,6 @@ fn default_max_keys_per_list_response() -> Option<i32> {
|
|||||||
DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
|
DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_azure_conn_pool_size() -> usize {
|
|
||||||
// Conservative default: no connection pooling. At time of writing this is the Azure
|
|
||||||
// SDK's default as well, due to historic reports of hard-to-reproduce issues
|
|
||||||
// (https://github.com/hyperium/hyper/issues/2312)
|
|
||||||
//
|
|
||||||
// However, using connection pooling is important to avoid exhausting client ports when
|
|
||||||
// doing huge numbers of requests (https://github.com/neondatabase/cloud/issues/20971)
|
|
||||||
0
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for S3Config {
|
impl Debug for S3Config {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.debug_struct("S3Config")
|
f.debug_struct("S3Config")
|
||||||
@@ -156,8 +146,6 @@ pub struct AzureConfig {
|
|||||||
pub concurrency_limit: NonZeroUsize,
|
pub concurrency_limit: NonZeroUsize,
|
||||||
#[serde(default = "default_max_keys_per_list_response")]
|
#[serde(default = "default_max_keys_per_list_response")]
|
||||||
pub max_keys_per_list_response: Option<i32>,
|
pub max_keys_per_list_response: Option<i32>,
|
||||||
#[serde(default = "default_azure_conn_pool_size")]
|
|
||||||
pub conn_pool_size: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
|
fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
|
||||||
@@ -314,7 +302,6 @@ timeout = '5s'";
|
|||||||
container_region = 'westeurope'
|
container_region = 'westeurope'
|
||||||
upload_storage_class = 'INTELLIGENT_TIERING'
|
upload_storage_class = 'INTELLIGENT_TIERING'
|
||||||
timeout = '7s'
|
timeout = '7s'
|
||||||
conn_pool_size = 8
|
|
||||||
";
|
";
|
||||||
|
|
||||||
let config = parse(toml).unwrap();
|
let config = parse(toml).unwrap();
|
||||||
@@ -329,7 +316,6 @@ timeout = '5s'";
|
|||||||
prefix_in_container: None,
|
prefix_in_container: None,
|
||||||
concurrency_limit: default_remote_storage_azure_concurrency_limit(),
|
concurrency_limit: default_remote_storage_azure_concurrency_limit(),
|
||||||
max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
|
max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
|
||||||
conn_pool_size: 8,
|
|
||||||
}),
|
}),
|
||||||
timeout: Duration::from_secs(7),
|
timeout: Duration::from_secs(7),
|
||||||
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
|
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
|
||||||
|
|||||||
@@ -70,14 +70,7 @@ pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
|
|||||||
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
||||||
|
|
||||||
/// As defined in S3 docs
|
/// As defined in S3 docs
|
||||||
///
|
pub const MAX_KEYS_PER_DELETE: usize = 1000;
|
||||||
/// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html>
|
|
||||||
pub const MAX_KEYS_PER_DELETE_S3: usize = 1000;
|
|
||||||
|
|
||||||
/// As defined in Azure docs
|
|
||||||
///
|
|
||||||
/// <https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch>
|
|
||||||
pub const MAX_KEYS_PER_DELETE_AZURE: usize = 256;
|
|
||||||
|
|
||||||
const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
|
const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
|
||||||
|
|
||||||
@@ -341,20 +334,12 @@ pub trait RemoteStorage: Send + Sync + 'static {
|
|||||||
/// If the operation fails because of timeout or cancellation, the root cause of the error will be
|
/// If the operation fails because of timeout or cancellation, the root cause of the error will be
|
||||||
/// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
|
/// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
|
||||||
/// through.
|
/// through.
|
||||||
async fn delete_objects(
|
async fn delete_objects<'a>(
|
||||||
&self,
|
&self,
|
||||||
paths: &[RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()>;
|
) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Returns the maximum number of keys that a call to [`Self::delete_objects`] can delete without chunking
|
|
||||||
///
|
|
||||||
/// The value returned is only an optimization hint, One can pass larger number of objects to
|
|
||||||
/// `delete_objects` as well.
|
|
||||||
///
|
|
||||||
/// The value is guaranteed to be >= 1.
|
|
||||||
fn max_keys_per_delete(&self) -> usize;
|
|
||||||
|
|
||||||
/// Deletes all objects matching the given prefix.
|
/// Deletes all objects matching the given prefix.
|
||||||
///
|
///
|
||||||
/// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
|
/// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
|
||||||
@@ -548,16 +533,6 @@ impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [`RemoteStorage::max_keys_per_delete`]
|
|
||||||
pub fn max_keys_per_delete(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Self::LocalFs(s) => s.max_keys_per_delete(),
|
|
||||||
Self::AwsS3(s) => s.max_keys_per_delete(),
|
|
||||||
Self::AzureBlob(s) => s.max_keys_per_delete(),
|
|
||||||
Self::Unreliable(s) => s.max_keys_per_delete(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See [`RemoteStorage::delete_prefix`]
|
/// See [`RemoteStorage::delete_prefix`]
|
||||||
pub async fn delete_prefix(
|
pub async fn delete_prefix(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -562,9 +562,9 @@ impl RemoteStorage for LocalFs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects(
|
async fn delete_objects<'a>(
|
||||||
&self,
|
&self,
|
||||||
paths: &[RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
for path in paths {
|
for path in paths {
|
||||||
@@ -573,10 +573,6 @@ impl RemoteStorage for LocalFs {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_keys_per_delete(&self) -> usize {
|
|
||||||
super::MAX_KEYS_PER_DELETE_S3
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn copy(
|
async fn copy(
|
||||||
&self,
|
&self,
|
||||||
from: &RemotePath,
|
from: &RemotePath,
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ use crate::{
|
|||||||
metrics::{start_counting_cancelled_wait, start_measuring_requests},
|
metrics::{start_counting_cancelled_wait, start_measuring_requests},
|
||||||
support::PermitCarrying,
|
support::PermitCarrying,
|
||||||
ConcurrencyLimiter, Download, DownloadError, DownloadOpts, Listing, ListingMode, ListingObject,
|
ConcurrencyLimiter, Download, DownloadError, DownloadOpts, Listing, ListingMode, ListingObject,
|
||||||
RemotePath, RemoteStorage, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE_S3,
|
RemotePath, RemoteStorage, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE,
|
||||||
REMOTE_STORAGE_PREFIX_SEPARATOR,
|
REMOTE_STORAGE_PREFIX_SEPARATOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -355,7 +355,7 @@ impl S3Bucket {
|
|||||||
let kind = RequestKind::Delete;
|
let kind = RequestKind::Delete;
|
||||||
let mut cancel = std::pin::pin!(cancel.cancelled());
|
let mut cancel = std::pin::pin!(cancel.cancelled());
|
||||||
|
|
||||||
for chunk in delete_objects.chunks(MAX_KEYS_PER_DELETE_S3) {
|
for chunk in delete_objects.chunks(MAX_KEYS_PER_DELETE) {
|
||||||
let started_at = start_measuring_requests(kind);
|
let started_at = start_measuring_requests(kind);
|
||||||
|
|
||||||
let req = self
|
let req = self
|
||||||
@@ -813,9 +813,9 @@ impl RemoteStorage for S3Bucket {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects(
|
async fn delete_objects<'a>(
|
||||||
&self,
|
&self,
|
||||||
paths: &[RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let kind = RequestKind::Delete;
|
let kind = RequestKind::Delete;
|
||||||
@@ -832,10 +832,6 @@ impl RemoteStorage for S3Bucket {
|
|||||||
self.delete_oids(&permit, &delete_objects, cancel).await
|
self.delete_oids(&permit, &delete_objects, cancel).await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_keys_per_delete(&self) -> usize {
|
|
||||||
MAX_KEYS_PER_DELETE_S3
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()> {
|
async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()> {
|
||||||
let paths = std::array::from_ref(path);
|
let paths = std::array::from_ref(path);
|
||||||
self.delete_objects(paths, cancel).await
|
self.delete_objects(paths, cancel).await
|
||||||
|
|||||||
@@ -181,9 +181,9 @@ impl RemoteStorage for UnreliableWrapper {
|
|||||||
self.delete_inner(path, true, cancel).await
|
self.delete_inner(path, true, cancel).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects(
|
async fn delete_objects<'a>(
|
||||||
&self,
|
&self,
|
||||||
paths: &[RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
self.attempt(RemoteOp::DeleteObjects(paths.to_vec()))?;
|
self.attempt(RemoteOp::DeleteObjects(paths.to_vec()))?;
|
||||||
@@ -203,10 +203,6 @@ impl RemoteStorage for UnreliableWrapper {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_keys_per_delete(&self) -> usize {
|
|
||||||
self.inner.max_keys_per_delete()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn copy(
|
async fn copy(
|
||||||
&self,
|
&self,
|
||||||
from: &RemotePath,
|
from: &RemotePath,
|
||||||
|
|||||||
@@ -218,7 +218,6 @@ async fn create_azure_client(
|
|||||||
prefix_in_container: Some(format!("test_{millis}_{random:08x}/")),
|
prefix_in_container: Some(format!("test_{millis}_{random:08x}/")),
|
||||||
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
||||||
max_keys_per_list_response,
|
max_keys_per_list_response,
|
||||||
conn_pool_size: 8,
|
|
||||||
}),
|
}),
|
||||||
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
|
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
|
||||||
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,
|
small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,
|
||||||
|
|||||||
@@ -5,9 +5,6 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
const_format.workspace = true
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
postgres_ffi.workspace = true
|
const_format.workspace = true
|
||||||
pq_proto.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
|
|||||||
@@ -1,27 +1,10 @@
|
|||||||
#![deny(unsafe_code)]
|
#![deny(unsafe_code)]
|
||||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||||
use const_format::formatcp;
|
use const_format::formatcp;
|
||||||
use pq_proto::SystemId;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Public API types
|
/// Public API types
|
||||||
pub mod models;
|
pub mod models;
|
||||||
|
|
||||||
/// Consensus logical timestamp. Note: it is a part of sk control file.
|
|
||||||
pub type Term = u64;
|
|
||||||
pub const INVALID_TERM: Term = 0;
|
|
||||||
|
|
||||||
/// Information about Postgres. Safekeeper gets it once and then verifies all
|
|
||||||
/// further connections from computes match. Note: it is a part of sk control
|
|
||||||
/// file.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub struct ServerInfo {
|
|
||||||
/// Postgres server version
|
|
||||||
pub pg_version: u32,
|
|
||||||
pub system_id: SystemId,
|
|
||||||
pub wal_seg_size: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const DEFAULT_PG_LISTEN_PORT: u16 = 5454;
|
pub const DEFAULT_PG_LISTEN_PORT: u16 = 5454;
|
||||||
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
|
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
|
||||||
|
|
||||||
|
|||||||
@@ -1,23 +1,10 @@
|
|||||||
//! Types used in safekeeper http API. Many of them are also reused internally.
|
|
||||||
|
|
||||||
use postgres_ffi::TimestampTz;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::net::SocketAddr;
|
|
||||||
use tokio::time::Instant;
|
|
||||||
|
|
||||||
use utils::{
|
use utils::{
|
||||||
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
id::{NodeId, TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
pageserver_feedback::PageserverFeedback,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{ServerInfo, Term};
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
|
||||||
pub struct SafekeeperStatus {
|
|
||||||
pub id: NodeId,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct TimelineCreateRequest {
|
pub struct TimelineCreateRequest {
|
||||||
pub tenant_id: TenantId,
|
pub tenant_id: TenantId,
|
||||||
@@ -31,161 +18,6 @@ pub struct TimelineCreateRequest {
|
|||||||
pub local_start_lsn: Option<Lsn>,
|
pub local_start_lsn: Option<Lsn>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same as TermLsn, but serializes LSN using display serializer
|
|
||||||
/// in Postgres format, i.e. 0/FFFFFFFF. Used only for the API response.
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
||||||
pub struct TermSwitchApiEntry {
|
|
||||||
pub term: Term,
|
|
||||||
pub lsn: Lsn,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Augment AcceptorState with last_log_term for convenience
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct AcceptorStateStatus {
|
|
||||||
pub term: Term,
|
|
||||||
pub epoch: Term, // aka last_log_term, old `epoch` name is left for compatibility
|
|
||||||
pub term_history: Vec<TermSwitchApiEntry>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Things safekeeper should know about timeline state on peers.
|
|
||||||
/// Used as both model and internally.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct PeerInfo {
|
|
||||||
pub sk_id: NodeId,
|
|
||||||
pub term: Term,
|
|
||||||
/// Term of the last entry.
|
|
||||||
pub last_log_term: Term,
|
|
||||||
/// LSN of the last record.
|
|
||||||
pub flush_lsn: Lsn,
|
|
||||||
pub commit_lsn: Lsn,
|
|
||||||
/// Since which LSN safekeeper has WAL.
|
|
||||||
pub local_start_lsn: Lsn,
|
|
||||||
/// When info was received. Serde annotations are not very useful but make
|
|
||||||
/// the code compile -- we don't rely on this field externally.
|
|
||||||
#[serde(skip)]
|
|
||||||
#[serde(default = "Instant::now")]
|
|
||||||
pub ts: Instant,
|
|
||||||
pub pg_connstr: String,
|
|
||||||
pub http_connstr: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type FullTransactionId = u64;
|
|
||||||
|
|
||||||
/// Hot standby feedback received from replica
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
||||||
pub struct HotStandbyFeedback {
|
|
||||||
pub ts: TimestampTz,
|
|
||||||
pub xmin: FullTransactionId,
|
|
||||||
pub catalog_xmin: FullTransactionId,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const INVALID_FULL_TRANSACTION_ID: FullTransactionId = 0;
|
|
||||||
|
|
||||||
impl HotStandbyFeedback {
|
|
||||||
pub fn empty() -> HotStandbyFeedback {
|
|
||||||
HotStandbyFeedback {
|
|
||||||
ts: 0,
|
|
||||||
xmin: 0,
|
|
||||||
catalog_xmin: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Standby status update
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
||||||
pub struct StandbyReply {
|
|
||||||
pub write_lsn: Lsn, // The location of the last WAL byte + 1 received and written to disk in the standby.
|
|
||||||
pub flush_lsn: Lsn, // The location of the last WAL byte + 1 flushed to disk in the standby.
|
|
||||||
pub apply_lsn: Lsn, // The location of the last WAL byte + 1 applied in the standby.
|
|
||||||
pub reply_ts: TimestampTz, // The client's system clock at the time of transmission, as microseconds since midnight on 2000-01-01.
|
|
||||||
pub reply_requested: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StandbyReply {
|
|
||||||
pub fn empty() -> Self {
|
|
||||||
StandbyReply {
|
|
||||||
write_lsn: Lsn::INVALID,
|
|
||||||
flush_lsn: Lsn::INVALID,
|
|
||||||
apply_lsn: Lsn::INVALID,
|
|
||||||
reply_ts: 0,
|
|
||||||
reply_requested: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
||||||
pub struct StandbyFeedback {
|
|
||||||
pub reply: StandbyReply,
|
|
||||||
pub hs_feedback: HotStandbyFeedback,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StandbyFeedback {
|
|
||||||
pub fn empty() -> Self {
|
|
||||||
StandbyFeedback {
|
|
||||||
reply: StandbyReply::empty(),
|
|
||||||
hs_feedback: HotStandbyFeedback::empty(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Receiver is either pageserver or regular standby, which have different
|
|
||||||
/// feedbacks.
|
|
||||||
/// Used as both model and internally.
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
||||||
pub enum ReplicationFeedback {
|
|
||||||
Pageserver(PageserverFeedback),
|
|
||||||
Standby(StandbyFeedback),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Uniquely identifies a WAL service connection. Logged in spans for
|
|
||||||
/// observability.
|
|
||||||
pub type ConnectionId = u32;
|
|
||||||
|
|
||||||
/// Serialize is used only for json'ing in API response. Also used internally.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct WalSenderState {
|
|
||||||
pub ttid: TenantTimelineId,
|
|
||||||
pub addr: SocketAddr,
|
|
||||||
pub conn_id: ConnectionId,
|
|
||||||
// postgres application_name
|
|
||||||
pub appname: Option<String>,
|
|
||||||
pub feedback: ReplicationFeedback,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct WalReceiverState {
|
|
||||||
/// None means it is recovery initiated by us (this safekeeper).
|
|
||||||
pub conn_id: Option<ConnectionId>,
|
|
||||||
pub status: WalReceiverStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Walreceiver status. Currently only whether it passed voting stage and
|
|
||||||
/// started receiving the stream, but it is easy to add more if needed.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub enum WalReceiverStatus {
|
|
||||||
Voting,
|
|
||||||
Streaming,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Info about timeline on safekeeper ready for reporting.
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct TimelineStatus {
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
pub timeline_id: TimelineId,
|
|
||||||
pub acceptor_state: AcceptorStateStatus,
|
|
||||||
pub pg_info: ServerInfo,
|
|
||||||
pub flush_lsn: Lsn,
|
|
||||||
pub timeline_start_lsn: Lsn,
|
|
||||||
pub local_start_lsn: Lsn,
|
|
||||||
pub commit_lsn: Lsn,
|
|
||||||
pub backup_lsn: Lsn,
|
|
||||||
pub peer_horizon_lsn: Lsn,
|
|
||||||
pub remote_consistent_lsn: Lsn,
|
|
||||||
pub peers: Vec<PeerInfo>,
|
|
||||||
pub walsenders: Vec<WalSenderState>,
|
|
||||||
pub walreceivers: Vec<WalReceiverState>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lsn_invalid() -> Lsn {
|
fn lsn_invalid() -> Lsn {
|
||||||
Lsn::INVALID
|
Lsn::INVALID
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user