mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-12 07:00:36 +00:00
Compare commits
4 Commits
release-74
...
conrad/pro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01475c9e75 | ||
|
|
c835bbba1f | ||
|
|
f94dde4432 | ||
|
|
4991a85704 |
@@ -3,16 +3,6 @@
|
|||||||
# by the RUSTDOCFLAGS env var in CI.
|
# by the RUSTDOCFLAGS env var in CI.
|
||||||
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
||||||
|
|
||||||
# Enable frame pointers. This may have a minor performance overhead, but makes it easier and more
|
|
||||||
# efficient to obtain stack traces (and thus CPU/heap profiles). It may also avoid seg faults that
|
|
||||||
# we've seen with libunwind-based profiling. See also:
|
|
||||||
#
|
|
||||||
# * <https://www.brendangregg.com/blog/2024-03-17/the-return-of-the-frame-pointers.html>
|
|
||||||
# * <https://github.com/rust-lang/rust/pull/122646>
|
|
||||||
#
|
|
||||||
# NB: the RUSTFLAGS envvar will replace this. Make sure to update e.g. Dockerfile as well.
|
|
||||||
rustflags = ["-Cforce-frame-pointers=yes"]
|
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
neon = ["run", "--bin", "neon_local"]
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
4
.github/actionlint.yml
vendored
4
.github/actionlint.yml
vendored
@@ -21,7 +21,3 @@ config-variables:
|
|||||||
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
||||||
- DEV_AWS_OIDC_ROLE_ARN
|
- DEV_AWS_OIDC_ROLE_ARN
|
||||||
- BENCHMARK_INGEST_TARGET_PROJECTID
|
- BENCHMARK_INGEST_TARGET_PROJECTID
|
||||||
- PGREGRESS_PG16_PROJECT_ID
|
|
||||||
- PGREGRESS_PG17_PROJECT_ID
|
|
||||||
- SLACK_ON_CALL_QA_STAGING_STREAM
|
|
||||||
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN
|
|
||||||
|
|||||||
@@ -7,9 +7,10 @@ inputs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
base-url:
|
base-url:
|
||||||
@@ -83,11 +84,12 @@ runs:
|
|||||||
ALLURE_VERSION: 2.27.0
|
ALLURE_VERSION: 2.27.0
|
||||||
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
|||||||
14
.github/actions/allure-report-store/action.yml
vendored
14
.github/actions/allure-report-store/action.yml
vendored
@@ -8,9 +8,10 @@ inputs:
|
|||||||
unique-key:
|
unique-key:
|
||||||
description: 'string to distinguish different results in the same run'
|
description: 'string to distinguish different results in the same run'
|
||||||
required: true
|
required: true
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -35,11 +36,12 @@ runs:
|
|||||||
env:
|
env:
|
||||||
REPORT_DIR: ${{ inputs.report-dir }}
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
|
|||||||
9
.github/actions/download/action.yml
vendored
9
.github/actions/download/action.yml
vendored
@@ -15,19 +15,10 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: 'OIDC role arn to interract with S3'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Download artifact
|
- name: Download artifact
|
||||||
id: download-artifact
|
id: download-artifact
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
20
.github/actions/run-python-test-set/action.yml
vendored
20
.github/actions/run-python-test-set/action.yml
vendored
@@ -48,9 +48,10 @@ inputs:
|
|||||||
description: 'benchmark durations JSON'
|
description: 'benchmark durations JSON'
|
||||||
required: false
|
required: false
|
||||||
default: '{}'
|
default: '{}'
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -61,7 +62,6 @@ runs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download Neon binaries for the previous release
|
- name: Download Neon binaries for the previous release
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -70,7 +70,6 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download compatibility snapshot
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -82,7 +81,6 @@ runs:
|
|||||||
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
||||||
# shouldn't fail the whole job. Only relevant test should fail.
|
# shouldn't fail the whole job. Only relevant test should fail.
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
@@ -220,19 +218,17 @@ runs:
|
|||||||
# The lack of compatibility snapshot shouldn't fail the job
|
# The lack of compatibility snapshot shouldn't fail the job
|
||||||
# (for example if we didn't run the test for non build-and-test workflow)
|
# (for example if we didn't run the test for non build-and-test workflow)
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-store
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
report-dir: /tmp/test_output/allure/results
|
report-dir: /tmp/test_output/allure/results
|
||||||
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
@@ -14,11 +14,9 @@ runs:
|
|||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Upload coverage data
|
- name: Upload coverage data
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
11
.github/actions/upload/action.yml
vendored
11
.github/actions/upload/action.yml
vendored
@@ -14,10 +14,6 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: "the OIDC role arn for aws auth"
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -57,13 +53,6 @@ runs:
|
|||||||
|
|
||||||
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
12
.github/file-filters.yaml
vendored
12
.github/file-filters.yaml
vendored
@@ -1,12 +0,0 @@
|
|||||||
rust_code: ['**/*.rs', '**/Cargo.toml', '**/Cargo.lock']
|
|
||||||
|
|
||||||
v14: ['vendor/postgres-v14/**', 'Makefile', 'pgxn/**']
|
|
||||||
v15: ['vendor/postgres-v15/**', 'Makefile', 'pgxn/**']
|
|
||||||
v16: ['vendor/postgres-v16/**', 'Makefile', 'pgxn/**']
|
|
||||||
v17: ['vendor/postgres-v17/**', 'Makefile', 'pgxn/**']
|
|
||||||
|
|
||||||
rebuild_neon_extra:
|
|
||||||
- .github/workflows/neon_extra_builds.yml
|
|
||||||
|
|
||||||
rebuild_macos:
|
|
||||||
- .github/workflows/build-macos.yml
|
|
||||||
@@ -70,7 +70,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
||||||
- name: Create benchmark_restore_status table if it does not exist
|
- name: Create benchmark_restore_status table if it does not exist
|
||||||
|
|||||||
20
.github/workflows/_build-and-test-locally.yml
vendored
20
.github/workflows/_build-and-test-locally.yml
vendored
@@ -31,13 +31,12 @@ defaults:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
container:
|
container:
|
||||||
image: ${{ inputs.build-tools-image }}
|
image: ${{ inputs.build-tools-image }}
|
||||||
credentials:
|
credentials:
|
||||||
@@ -206,13 +205,6 @@ jobs:
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 18000 # 5 hours
|
|
||||||
|
|
||||||
- name: Run rust tests
|
- name: Run rust tests
|
||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
@@ -264,7 +256,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
@@ -274,10 +265,6 @@ jobs:
|
|||||||
regress-tests:
|
regress-tests:
|
||||||
# Don't run regression tests on debug arm64 builds
|
# Don't run regression tests on debug arm64 builds
|
||||||
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
statuses: write
|
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
container:
|
container:
|
||||||
@@ -296,7 +283,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }}
|
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
with:
|
with:
|
||||||
@@ -308,7 +295,6 @@ jobs:
|
|||||||
real_s3_region: eu-central-1
|
real_s3_region: eu-central-1
|
||||||
rerun_failed: true
|
rerun_failed: true
|
||||||
pg_version: ${{ matrix.pg_version }}
|
pg_version: ${{ matrix.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
|
|||||||
2
.github/workflows/actionlint.yml
vendored
2
.github/workflows/actionlint.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
||||||
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
||||||
with:
|
with:
|
||||||
fail_level: error
|
fail_on_error: true
|
||||||
filter_mode: nofilter
|
filter_mode: nofilter
|
||||||
level: error
|
level: error
|
||||||
|
|
||||||
|
|||||||
67
.github/workflows/benchmarking.yml
vendored
67
.github/workflows/benchmarking.yml
vendored
@@ -105,7 +105,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -123,7 +122,7 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# to ensure tests are running in order of appears in the file.
|
# to ensure tests are running in order of appears in the file.
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
@@ -153,7 +152,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -205,7 +204,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Run Logical Replication benchmarks
|
- name: Run Logical Replication benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -216,7 +214,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -233,7 +231,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -245,7 +243,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -308,7 +306,6 @@ jobs:
|
|||||||
"image": [ "'"$image_default"'" ],
|
"image": [ "'"$image_default"'" ],
|
||||||
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
@@ -408,10 +405,9 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-new-many-tables", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
@@ -430,7 +426,7 @@ jobs:
|
|||||||
neonvm-captest-sharding-reuse)
|
neonvm-captest-sharding-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neonvm-captest-new | neonvm-captest-new-many-tables | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -447,26 +443,6 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
# we want to compare Neon project OLTP throughput and latency at scale factor 10 GB
|
|
||||||
# without (neonvm-captest-new)
|
|
||||||
# and with (neonvm-captest-new-many-tables) many relations in the database
|
|
||||||
- name: Create many relations before the run
|
|
||||||
if: contains(fromJson('["neonvm-captest-new-many-tables"]'), matrix.platform)
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_perf_many_relations
|
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
TEST_NUM_RELATIONS: 10000
|
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -476,7 +452,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -491,7 +467,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -506,7 +482,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -524,7 +500,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -635,7 +611,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -650,7 +626,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600
|
extra_params: -m remote_cluster --timeout 21600
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -661,7 +637,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -732,7 +708,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -764,7 +739,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -778,7 +753,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -843,7 +818,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get Connstring Secret Name
|
- name: Get Connstring Secret Name
|
||||||
run: |
|
run: |
|
||||||
@@ -882,7 +856,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -894,7 +868,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -952,7 +926,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -984,7 +957,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -995,7 +968,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
|||||||
241
.github/workflows/build-macos.yml
vendored
241
.github/workflows/build-macos.yml
vendored
@@ -1,241 +0,0 @@
|
|||||||
name: Check neon with MacOS builds
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
pg_versions:
|
|
||||||
description: "Array of the pg versions to build for, for example: ['v14', 'v17']"
|
|
||||||
type: string
|
|
||||||
default: '[]'
|
|
||||||
required: false
|
|
||||||
rebuild_rust_code:
|
|
||||||
description: "Rebuild Rust code"
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
required: false
|
|
||||||
rebuild_everything:
|
|
||||||
description: "If true, rebuild for all versions"
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
required: false
|
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
COPT: '-Werror'
|
|
||||||
|
|
||||||
# TODO: move `check-*` and `files-changed` jobs to the "Caller" Workflow
|
|
||||||
# We should care about that as Github has limitations:
|
|
||||||
# - You can connect up to four levels of workflows
|
|
||||||
# - You can call a maximum of 20 unique reusable workflows from a single workflow file.
|
|
||||||
# https://docs.github.com/en/actions/sharing-automations/reusing-workflows#limitations
|
|
||||||
jobs:
|
|
||||||
build-pgxn:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
postgres-version: ${{ inputs.rebuild_everything && fromJson('["v14", "v15", "v16", "v17"]') || fromJSON(inputs.pg_versions) }}
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set pg ${{ matrix.postgres-version }} for caching
|
|
||||||
id: pg_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-${{ matrix.postgres-version }}) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres ${{ matrix.postgres-version }} build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/${{ matrix.postgres-version }}
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ matrix.postgres-version }}-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Checkout submodule vendor/postgres-${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git submodule init vendor/postgres-${{ matrix.postgres-version }}
|
|
||||||
git submodule update --depth 1 --recursive
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build Postgres ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make postgres-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build Neon Pg Ext ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make "neon-pg-ext-${{ matrix.postgres-version }}" -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Get postgres headers ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make postgres-headers-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
build-walproposer-lib:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
needs: [build-pgxn]
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set pg v17 for caching
|
|
||||||
id: pg_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres v17 build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v17
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache walproposer-lib
|
|
||||||
id: cache_walproposer_lib
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/build/walproposer-lib
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Checkout submodule vendor/postgres-v17
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git submodule init vendor/postgres-v17
|
|
||||||
git submodule update --depth 1 --recursive
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build walproposer-lib (only for v17)
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run:
|
|
||||||
make walproposer-lib -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
cargo-build:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_rust_code || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
needs: [build-pgxn, build-walproposer-lib]
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
|
|
||||||
- name: Set pg v14 for caching
|
|
||||||
id: pg_rev_v14
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v15 for caching
|
|
||||||
id: pg_rev_v15
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v16 for caching
|
|
||||||
id: pg_rev_v16
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v17 for caching
|
|
||||||
id: pg_rev_v17
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v14
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v14-${{ steps.pg_rev_v14.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v15 build
|
|
||||||
id: cache_pg_v15
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v15
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v15-${{ steps.pg_rev_v15.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v16 build
|
|
||||||
id: cache_pg_v16
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v16
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v16-${{ steps.pg_rev_v16.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v17 build
|
|
||||||
id: cache_pg_v17
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v17
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache cargo deps (only for v17)
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
|
||||||
|
|
||||||
- name: Cache walproposer-lib
|
|
||||||
id: cache_walproposer_lib
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/build/walproposer-lib
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Run cargo build (only for v17)
|
|
||||||
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Check that no warnings are produced (only for v17)
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
268
.github/workflows/build_and_test.yml
vendored
268
.github/workflows/build_and_test.yml
vendored
@@ -21,6 +21,8 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
||||||
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
|
|
||||||
@@ -212,7 +214,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
- name: Run cargo clippy (debug)
|
- name: Run cargo clippy (debug)
|
||||||
run: cargo hack --features default --ignore-unknown-features --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
- name: Check documentation generation
|
- name: Check documentation generation
|
||||||
run: cargo doc --workspace --no-deps --document-private-items
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
@@ -253,15 +255,15 @@ jobs:
|
|||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
build-tag: ${{ needs.tag.outputs.build-tag }}
|
build-tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds.
|
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
||||||
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled.
|
# run without LFC on v17 release only
|
||||||
test-cfg: |
|
test-cfg: |
|
||||||
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"},
|
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v15", "lfc_state": "with-lfc"},
|
{"pg_version":"v15", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v16", "lfc_state": "with-lfc"},
|
{"pg_version":"v16", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "with-lfc"},
|
{"pg_version":"v17", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "without-lfc"}]'
|
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|
||||||
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }}
|
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
||||||
@@ -303,11 +305,6 @@ jobs:
|
|||||||
benchmarks:
|
benchmarks:
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
||||||
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -336,7 +333,6 @@ jobs:
|
|||||||
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
||||||
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -349,11 +345,6 @@ jobs:
|
|||||||
report-benchmarks-failures:
|
report-benchmarks-failures:
|
||||||
needs: [ benchmarks, create-test-report ]
|
needs: [ benchmarks, create-test-report ]
|
||||||
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -369,11 +360,6 @@ jobs:
|
|||||||
create-test-report:
|
create-test-report:
|
||||||
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
||||||
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
outputs:
|
outputs:
|
||||||
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
|
|
||||||
@@ -394,7 +380,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -426,10 +411,6 @@ jobs:
|
|||||||
coverage-report:
|
coverage-report:
|
||||||
if: ${{ !startsWith(github.ref_name, 'release') }}
|
if: ${{ !startsWith(github.ref_name, 'release') }}
|
||||||
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -456,14 +437,12 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get coverage artifact
|
- name: Get coverage artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
@@ -538,7 +517,7 @@ jobs:
|
|||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
||||||
needs: [ check-permissions, promote-images-dev, tag ]
|
needs: [ check-permissions, promote-images, tag ]
|
||||||
uses: ./.github/workflows/trigger-e2e-tests.yml
|
uses: ./.github/workflows/trigger-e2e-tests.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@@ -594,10 +573,6 @@ jobs:
|
|||||||
neon-image:
|
neon-image:
|
||||||
needs: [ neon-image-arch, tag ]
|
needs: [ neon-image-arch, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -612,15 +587,11 @@ jobs:
|
|||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Push multi-arch image to ECR
|
- name: Push multi-arch image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -629,10 +600,6 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image-arch:
|
compute-node-image-arch:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -673,15 +640,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
@@ -754,10 +717,6 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
needs: [ compute-node-image-arch, tag ]
|
needs: [ compute-node-image-arch, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -802,15 +761,11 @@ jobs:
|
|||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -840,7 +795,7 @@ jobs:
|
|||||||
- pg: v17
|
- pg: v17
|
||||||
debian: bookworm
|
debian: bookworm
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.37.1
|
VM_BUILDER_VERSION: v0.35.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -930,14 +885,12 @@ jobs:
|
|||||||
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
||||||
docker compose -f ./docker-compose/docker-compose.yml down
|
docker compose -f ./docker-compose/docker-compose.yml down
|
||||||
|
|
||||||
promote-images-dev:
|
promote-images:
|
||||||
needs: [ check-permissions, tag, vm-compute-node-image ]
|
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
id-token: write # for `aws-actions/configure-aws-credentials`
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
VERSIONS: v14 v15 v16 v17
|
VERSIONS: v14 v15 v16 v17
|
||||||
@@ -948,15 +901,12 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- name: Login to dev ECR
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Copy vm-compute-node images to ECR
|
- name: Copy vm-compute-node images to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -965,35 +915,6 @@ jobs:
|
|||||||
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
done
|
done
|
||||||
|
|
||||||
promote-images-prod:
|
|
||||||
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
|
||||||
VERSIONS: v14 v15 v16 v17
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Add latest tag to images
|
- name: Add latest tag to images
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
run: |
|
run: |
|
||||||
@@ -1039,7 +960,7 @@ jobs:
|
|||||||
|
|
||||||
push-to-acr-dev:
|
push-to-acr-dev:
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
needs: [ tag, promote-images-dev ]
|
needs: [ tag, promote-images ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
||||||
@@ -1051,7 +972,7 @@ jobs:
|
|||||||
|
|
||||||
push-to-acr-prod:
|
push-to-acr-prod:
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
needs: [ tag, promote-images-prod ]
|
needs: [ tag, promote-images ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
||||||
@@ -1064,11 +985,6 @@ jobs:
|
|||||||
trigger-custom-extensions-build-and-wait:
|
trigger-custom-extensions-build-and-wait:
|
||||||
needs: [ check-permissions, tag ]
|
needs: [ check-permissions, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -1141,82 +1057,15 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
needs: [ check-permissions, promote-images-prod, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
||||||
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Create git tag and GitHub release
|
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
retries: 5
|
|
||||||
script: |
|
|
||||||
const tag = "${{ needs.tag.outputs.build-tag }}";
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRef = await github.rest.git.getRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `tags/${tag}`,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (existingRef.data.object.sha !== context.sha) {
|
|
||||||
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} does not exist. Creating it...`);
|
|
||||||
await github.rest.git.createRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `refs/tags/${tag}`,
|
|
||||||
sha: context.sha,
|
|
||||||
});
|
|
||||||
console.log(`Tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: check how GitHub releases looks for proxy/compute releases and enable them if they're ok
|
|
||||||
if (context.ref !== 'refs/heads/release') {
|
|
||||||
console.log(`GitHub release skipped for ${context.ref}.`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRelease = await github.rest.repos.getReleaseByTag({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag: tag,
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} does not exist. Creating it...`);
|
|
||||||
await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: tag,
|
|
||||||
generate_release_notes: true,
|
|
||||||
});
|
|
||||||
console.log(`Release for tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
@@ -1266,13 +1115,38 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Create git tag
|
||||||
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
|
||||||
|
sha: context.sha,
|
||||||
|
})
|
||||||
|
|
||||||
|
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
|
||||||
|
- name: Create GitHub release
|
||||||
|
if: github.ref_name == 'release'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: "${{ needs.tag.outputs.build-tag }}",
|
||||||
|
generate_release_notes: true,
|
||||||
|
})
|
||||||
|
|
||||||
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
||||||
promote-compatibility-data:
|
promote-compatibility-data:
|
||||||
needs: [ deploy ]
|
needs: [ deploy ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: github.ref_name == 'release' && !failure() && !cancelled()
|
if: github.ref_name == 'release' && !failure() && !cancelled()
|
||||||
|
|
||||||
@@ -1309,12 +1183,6 @@ jobs:
|
|||||||
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
||||||
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Promote compatibility snapshot and Neon artifact
|
- name: Promote compatibility snapshot and Neon artifact
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
@@ -1362,7 +1230,7 @@ jobs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
pin-build-tools-image:
|
pin-build-tools-image:
|
||||||
needs: [ build-build-tools-image, promote-images-prod, build-and-test-locally ]
|
needs: [ build-build-tools-image, promote-images, build-and-test-locally ]
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
uses: ./.github/workflows/pin-build-tools-image.yml
|
uses: ./.github/workflows/pin-build-tools-image.yml
|
||||||
with:
|
with:
|
||||||
@@ -1385,7 +1253,7 @@ jobs:
|
|||||||
- build-and-test-locally
|
- build-and-test-locally
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
- check-codestyle-rust
|
- check-codestyle-rust
|
||||||
- promote-images-dev
|
- promote-images
|
||||||
- test-images
|
- test-images
|
||||||
- trigger-custom-extensions-build-and-wait
|
- trigger-custom-extensions-build-and-wait
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|||||||
46
.github/workflows/cloud-regress.yml
vendored
46
.github/workflows/cloud-regress.yml
vendored
@@ -19,21 +19,15 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}
|
group: ${{ github.workflow }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
regress:
|
regress:
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
strategy:
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
fail-fast: false
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
matrix:
|
|
||||||
pg-version: [16, 17]
|
|
||||||
|
|
||||||
runs-on: us-east-2
|
runs-on: us-east-2
|
||||||
container:
|
container:
|
||||||
@@ -46,11 +40,9 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Patch the test
|
- name: Patch the test
|
||||||
env:
|
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd "vendor/postgres-v${PG_VERSION}"
|
cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
|
||||||
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch"
|
patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
|
||||||
|
|
||||||
- name: Generate a random password
|
- name: Generate a random password
|
||||||
id: pwgen
|
id: pwgen
|
||||||
@@ -63,9 +55,8 @@ jobs:
|
|||||||
- name: Change tests according to the generated password
|
- name: Change tests according to the generated password
|
||||||
env:
|
env:
|
||||||
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress
|
cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
|
||||||
for fname in sql/*.sql expected/*.out; do
|
for fname in sql/*.sql expected/*.out; do
|
||||||
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
||||||
done
|
done
|
||||||
@@ -81,46 +72,27 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create a new branch
|
|
||||||
id: create-branch
|
|
||||||
uses: ./.github/actions/neon-branch-create
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
|
|
||||||
- name: Run the regression tests
|
- name: Run the regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
test_selection: cloud_regress
|
test_selection: cloud_regress
|
||||||
pg_version: ${{matrix.pg-version}}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}}
|
BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
|
||||||
|
|
||||||
- name: Delete branch
|
|
||||||
if: always()
|
|
||||||
uses: ./.github/actions/neon-branch-delete
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
branch_id: ${{steps.create-branch.outputs.branch_id}}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }}
|
channel-id: "C033QLM5P7D" # on-call-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic pg_regress on staging: ${{ job.status }}
|
Periodic pg_regress on staging: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
|
|||||||
15
.github/workflows/ingest_benchmark.yml
vendored
15
.github/workflows/ingest_benchmark.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||||
- cron: '0 9 * * *' # run once a day, timezone is utc
|
- cron: '0 9 * * *' # run once a day, timezone is utc
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # allow other variants to continue even if one fails
|
fail-fast: false # allow other variants to continue even if one fails
|
||||||
matrix:
|
matrix:
|
||||||
target_project: [new_empty_project, large_existing_project]
|
target_project: [new_empty_project, large_existing_project]
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
statuses: write
|
statuses: write
|
||||||
@@ -56,7 +56,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||||
|
|
||||||
- name: Download Neon artifact
|
- name: Download Neon artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
@@ -64,7 +64,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: ${{ matrix.target_project == 'new_empty_project' }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
@@ -95,7 +94,7 @@ jobs:
|
|||||||
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project
|
||||||
if: ${{ matrix.target_project == 'large_existing_project' }}
|
if: ${{ matrix.target_project == 'large_existing_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
||||||
@@ -123,7 +122,7 @@ jobs:
|
|||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
||||||
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Invoke pgcopydb
|
- name: Invoke pgcopydb
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: remote
|
build_type: remote
|
||||||
@@ -132,7 +131,7 @@ jobs:
|
|||||||
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
save_perf_report: true
|
save_perf_report: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
||||||
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
||||||
@@ -144,7 +143,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
|||||||
152
.github/workflows/neon_extra_builds.yml
vendored
152
.github/workflows/neon_extra_builds.yml
vendored
@@ -31,15 +31,19 @@ jobs:
|
|||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
files-changed:
|
check-macos-build:
|
||||||
name: Detect what files changed
|
needs: [ check-permissions ]
|
||||||
runs-on: ubuntu-22.04
|
if: |
|
||||||
timeout-minutes: 3
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
outputs:
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
v17: ${{ steps.files_changed.outputs.v17 }}
|
github.ref_name == 'main'
|
||||||
postgres_changes: ${{ steps.postgres_changes.outputs.changes }}
|
timeout-minutes: 90
|
||||||
rebuild_rust_code: ${{ steps.files_changed.outputs.rust_code }}
|
runs-on: macos-15
|
||||||
rebuild_everything: ${{ steps.files_changed.outputs.rebuild_neon_extra || steps.files_changed.outputs.rebuild_macos }}
|
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -47,45 +51,102 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Check for Postgres changes
|
- name: Install macOS postgres dependencies
|
||||||
uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 #v3
|
run: brew install flex bison openssl protobuf icu4c
|
||||||
id: files_changed
|
|
||||||
|
- name: Set pg 14 revision for caching
|
||||||
|
id: pg_v14_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 15 revision for caching
|
||||||
|
id: pg_v15_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 16 revision for caching
|
||||||
|
id: pg_v16_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 17 revision for caching
|
||||||
|
id: pg_v17_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg_14
|
||||||
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ github.token }}
|
path: pg_install/v14
|
||||||
filters: .github/file-filters.yaml
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
base: ${{ github.event_name != 'pull_request' && (github.event.merge_group.base_ref || github.ref_name) || '' }}
|
|
||||||
ref: ${{ github.event_name != 'pull_request' && (github.event.merge_group.head_ref || github.ref) || '' }}
|
|
||||||
|
|
||||||
- name: Filter out only v-string for build matrix
|
- name: Cache postgres v15 build
|
||||||
id: postgres_changes
|
id: cache_pg_15
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v16 build
|
||||||
|
id: cache_pg_16
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v16
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v17 build
|
||||||
|
id: cache_pg_17
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v17
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
run: |
|
run: |
|
||||||
v_strings_only_as_json_array=$(echo ${{ steps.files_changed.outputs.chnages }} | jq '.[]|select(test("v\\d+"))' | jq --slurp -c)
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
echo "changes=${v_strings_only_as_json_array}" | tee -a "${GITHUB_OUTPUT}"
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
check-macos-build:
|
- name: Cache cargo deps
|
||||||
needs: [ check-permissions, files-changed ]
|
uses: actions/cache@v4
|
||||||
if: |
|
with:
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
path: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
~/.cargo/registry
|
||||||
github.ref_name == 'main'
|
!~/.cargo/registry/src
|
||||||
uses: ./.github/workflows/build-macos.yml
|
~/.cargo/git
|
||||||
with:
|
target
|
||||||
pg_versions: ${{ needs.files-changed.outputs.postgres_changes }}
|
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
||||||
rebuild_rust_code: ${{ needs.files-changed.outputs.rebuild_rust_code }}
|
|
||||||
rebuild_everything: ${{ fromJson(needs.files-changed.outputs.rebuild_everything) }}
|
- name: Build postgres v14
|
||||||
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v14 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v15
|
||||||
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v15 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v16
|
||||||
|
if: steps.cache_pg_16.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v16 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v17
|
||||||
|
if: steps.cache_pg_17.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v17 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build neon extensions
|
||||||
|
run: make neon-pg-ext -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build walproposer-lib
|
||||||
|
run: make walproposer-lib -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Run cargo build
|
||||||
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v16/lib cargo build --all --release
|
||||||
|
|
||||||
|
- name: Check that no warnings are produced
|
||||||
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
needs: [ check-permissions, build-build-tools-image, files-changed ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
if: |
|
if: |
|
||||||
(needs.files-changed.outputs.v17 == 'true' || needs.files-changed.outputs.rebuild_everything == 'true') && (
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
github.ref_name == 'main'
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
runs-on: [ self-hosted, large ]
|
runs-on: [ self-hosted, large ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -116,18 +177,13 @@ jobs:
|
|||||||
- name: Produce the build stats
|
- name: Produce the build stats
|
||||||
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
id: upload-stats
|
id: upload-stats
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
run: |
|
run: |
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
|||||||
27
.github/workflows/periodic_pagebench.yml
vendored
27
.github/workflows/periodic_pagebench.yml
vendored
@@ -27,11 +27,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trigger_bench_on_ec2_machine_in_eu_central_1:
|
trigger_bench_on_ec2_machine_in_eu_central_1:
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned-bookworm
|
||||||
@@ -43,6 +38,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
||||||
RUN_ID: ${{ github.run_id }}
|
RUN_ID: ${{ github.run_id }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
|
||||||
AWS_DEFAULT_REGION : "eu-central-1"
|
AWS_DEFAULT_REGION : "eu-central-1"
|
||||||
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
||||||
steps:
|
steps:
|
||||||
@@ -53,13 +50,6 @@ jobs:
|
|||||||
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
||||||
run: curl https://ifconfig.me
|
run: curl https://ifconfig.me
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Start EC2 instance and wait for the instance to boot up
|
- name: Start EC2 instance and wait for the instance to boot up
|
||||||
run: |
|
run: |
|
||||||
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
||||||
@@ -134,10 +124,11 @@ jobs:
|
|||||||
cat "test_log_${GITHUB_RUN_ID}"
|
cat "test_log_${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -157,14 +148,6 @@ jobs:
|
|||||||
-H "Authorization: Bearer $API_KEY" \
|
-H "Authorization: Bearer $API_KEY" \
|
||||||
-d ''
|
-d ''
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Stop EC2 instance and wait for the instance to be stopped
|
- name: Stop EC2 instance and wait for the instance to be stopped
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
12
.github/workflows/pg-clients.yml
vendored
12
.github/workflows/pg-clients.yml
vendored
@@ -25,13 +25,11 @@ defaults:
|
|||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write # require for posting a status update
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 16
|
DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: neon-captest-new
|
PLATFORM: neon-captest-new
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
AWS_DEFAULT_REGION: eu-central-1
|
AWS_DEFAULT_REGION: eu-central-1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -96,7 +94,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -113,7 +110,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -130,7 +126,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -164,7 +159,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -181,7 +175,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -198,7 +191,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/pin-build-tools-image.yml
vendored
14
.github/workflows/pin-build-tools-image.yml
vendored
@@ -67,7 +67,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # for `azure/login` and aws auth
|
id-token: write # for `azure/login`
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -75,15 +75,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Azure login
|
- name: Azure login
|
||||||
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
||||||
|
|||||||
1
.github/workflows/pre-merge-checks.yml
vendored
1
.github/workflows/pre-merge-checks.yml
vendored
@@ -63,7 +63,6 @@ jobs:
|
|||||||
if: always()
|
if: always()
|
||||||
permissions:
|
permissions:
|
||||||
statuses: write # for `github.repos.createCommitStatus(...)`
|
statuses: write # for `github.repos.createCommitStatus(...)`
|
||||||
contents: write
|
|
||||||
needs:
|
needs:
|
||||||
- get-changed-files
|
- get-changed-files
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: Create Release Branch
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# It should be kept in sync with if-condition in jobs
|
# It should be kept in sync with if-condition in jobs
|
||||||
- cron: '0 6 * * FRI' # Storage release
|
- cron: '0 6 * * MON' # Storage release
|
||||||
- cron: '0 6 * * THU' # Proxy release
|
- cron: '0 6 * * THU' # Proxy release
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@@ -29,7 +29,7 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
8
.github/workflows/trigger-e2e-tests.yml
vendored
8
.github/workflows/trigger-e2e-tests.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
TAG: ${{ needs.tag.outputs.build-tag }}
|
TAG: ${{ needs.tag.outputs.build-tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Wait for `promote-images-dev` job to finish
|
- name: Wait for `promote-images` job to finish
|
||||||
# It's important to have a timeout here, the script in the step can run infinitely
|
# It's important to have a timeout here, the script in the step can run infinitely
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
run: |
|
run: |
|
||||||
@@ -79,17 +79,17 @@ jobs:
|
|||||||
# For PRs we use the run id as the tag
|
# For PRs we use the run id as the tag
|
||||||
BUILD_AND_TEST_RUN_ID=${TAG}
|
BUILD_AND_TEST_RUN_ID=${TAG}
|
||||||
while true; do
|
while true; do
|
||||||
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images-dev") | .conclusion')
|
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images") | .conclusion')
|
||||||
case "$conclusion" in
|
case "$conclusion" in
|
||||||
success)
|
success)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
failure | cancelled | skipped)
|
failure | cancelled | skipped)
|
||||||
echo "The 'promote-images-dev' job didn't succeed: '${conclusion}'. Exiting..."
|
echo "The 'promote-images' job didn't succeed: '${conclusion}'. Exiting..."
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "The 'promote-images-dev' hasn't succeed yet. Waiting..."
|
echo "The 'promote-images' hasn't succeed yet. Waiting..."
|
||||||
sleep 60
|
sleep 60
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|||||||
33
CODEOWNERS
33
CODEOWNERS
@@ -1,29 +1,16 @@
|
|||||||
# Autoscaling
|
|
||||||
/libs/vm_monitor/ @neondatabase/autoscaling
|
|
||||||
|
|
||||||
# DevProd
|
|
||||||
/.github/ @neondatabase/developer-productivity
|
/.github/ @neondatabase/developer-productivity
|
||||||
|
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
||||||
# Compute
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
/pgxn/ @neondatabase/compute
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
||||||
/vendor/ @neondatabase/compute
|
|
||||||
/compute/ @neondatabase/compute
|
|
||||||
/compute_tools/ @neondatabase/compute
|
|
||||||
|
|
||||||
# Proxy
|
|
||||||
/libs/proxy/ @neondatabase/proxy
|
/libs/proxy/ @neondatabase/proxy
|
||||||
/proxy/ @neondatabase/proxy
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/storage
|
||||||
# Storage
|
/libs/vm_monitor/ @neondatabase/autoscaling
|
||||||
/pageserver/ @neondatabase/storage
|
/pageserver/ @neondatabase/storage
|
||||||
|
/pgxn/ @neondatabase/compute
|
||||||
|
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/proxy/ @neondatabase/proxy
|
||||||
/safekeeper/ @neondatabase/storage
|
/safekeeper/ @neondatabase/storage
|
||||||
/storage_controller @neondatabase/storage
|
/storage_controller @neondatabase/storage
|
||||||
/storage_scrubber @neondatabase/storage
|
/storage_scrubber @neondatabase/storage
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
/vendor/ @neondatabase/compute
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
|
||||||
/libs/safekeeper_api/ @neondatabase/storage
|
|
||||||
|
|
||||||
# Shared
|
|
||||||
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
|
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
|
||||||
|
|||||||
751
Cargo.lock
generated
751
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
23
Cargo.toml
23
Cargo.toml
@@ -11,7 +11,6 @@ members = [
|
|||||||
"pageserver/pagebench",
|
"pageserver/pagebench",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"safekeeper/client",
|
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"storage_controller",
|
"storage_controller",
|
||||||
"storage_controller/client",
|
"storage_controller/client",
|
||||||
@@ -52,7 +51,10 @@ anyhow = { version = "1.0", features = ["backtrace"] }
|
|||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
||||||
atomic-take = "1.1.0"
|
atomic-take = "1.1.0"
|
||||||
backtrace = "0.3.74"
|
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
||||||
|
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
flate2 = "1.0.26"
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
@@ -65,7 +67,7 @@ aws-smithy-types = "1.2"
|
|||||||
aws-credential-types = "1.2.0"
|
aws-credential-types = "1.2.0"
|
||||||
aws-sigv4 = { version = "1.2", features = ["sign-http"] }
|
aws-sigv4 = { version = "1.2", features = ["sign-http"] }
|
||||||
aws-types = "1.3"
|
aws-types = "1.3"
|
||||||
axum = { version = "0.7.9", features = ["ws"] }
|
axum = { version = "0.7.5", features = ["ws"] }
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
bindgen = "0.70"
|
bindgen = "0.70"
|
||||||
@@ -135,7 +137,7 @@ parquet = { version = "53", default-features = false, features = ["zstd"] }
|
|||||||
parquet_derive = "53"
|
parquet_derive = "53"
|
||||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "protobuf", "protobuf-codec"] }
|
pprof = { version = "0.14", features = ["criterion", "flamegraph", "protobuf", "protobuf-codec"] }
|
||||||
procfs = "0.16"
|
procfs = "0.16"
|
||||||
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
@@ -187,9 +189,7 @@ tokio-util = { version = "0.7.10", features = ["io", "rt"] }
|
|||||||
toml = "0.8"
|
toml = "0.8"
|
||||||
toml_edit = "0.22"
|
toml_edit = "0.22"
|
||||||
tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
|
tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
|
||||||
tower = { version = "0.5.2", default-features = false }
|
tower-service = "0.3.2"
|
||||||
tower-http = { version = "0.6.2", features = ["request-id", "trace"] }
|
|
||||||
tower-service = "0.3.3"
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-error = "0.2"
|
tracing-error = "0.2"
|
||||||
tracing-opentelemetry = "0.27"
|
tracing-opentelemetry = "0.27"
|
||||||
@@ -216,12 +216,6 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
|
|||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
|
|
||||||
## Azure SDK crates
|
|
||||||
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
|
||||||
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
@@ -237,7 +231,6 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
|
|||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
safekeeper_client = { path = "./safekeeper/client" }
|
|
||||||
desim = { version = "0.1", path = "./libs/desim" }
|
desim = { version = "0.1", path = "./libs/desim" }
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
storage_controller_client = { path = "./storage_controller/client" }
|
storage_controller_client = { path = "./storage_controller/client" }
|
||||||
@@ -268,8 +261,6 @@ tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", br
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
# This is useful for profiling and, to some extent, debug.
|
# This is useful for profiling and, to some extent, debug.
|
||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
#
|
|
||||||
# NB: we also enable frame pointers for improved profiling, see .cargo/config.toml.
|
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
# disable debug symbols for all packages except this one to decrease binaries size
|
# disable debug symbols for all packages except this one to decrease binaries size
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ COPY --chown=nonroot . .
|
|||||||
|
|
||||||
ARG ADDITIONAL_RUSTFLAGS
|
ARG ADDITIONAL_RUSTFLAGS
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
||||||
--bin pg_sni_router \
|
--bin pg_sni_router \
|
||||||
--bin pageserver \
|
--bin pageserver \
|
||||||
--bin pagectl \
|
--bin pagectl \
|
||||||
@@ -69,8 +69,6 @@ RUN set -e \
|
|||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libseccomp-dev \
|
libseccomp-dev \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
# System postgres for use with client libraries (e.g. in storage controller)
|
|
||||||
postgresql-15 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
@@ -103,6 +101,11 @@ RUN mkdir -p /data/.neon/ && \
|
|||||||
> /data/.neon/pageserver.toml && \
|
> /data/.neon/pageserver.toml && \
|
||||||
chown -R neon:neon /data/.neon
|
chown -R neon:neon /data/.neon
|
||||||
|
|
||||||
|
# When running a binary that links with libpq, default to using our most recent postgres version. Binaries
|
||||||
|
# that want a particular postgres version will select it explicitly: this is just a default.
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/v${DEFAULT_PG_VERSION}/lib
|
||||||
|
|
||||||
|
|
||||||
VOLUME ["/data"]
|
VOLUME ["/data"]
|
||||||
USER neon
|
USER neon
|
||||||
EXPOSE 6400
|
EXPOSE 6400
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ RUN set -e \
|
|||||||
|
|
||||||
# Keep the version the same as in compute/compute-node.Dockerfile and
|
# Keep the version the same as in compute/compute-node.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
ENV SQL_EXPORTER_VERSION=0.16.0
|
ENV SQL_EXPORTER_VERSION=0.13.1
|
||||||
RUN curl -fsSL \
|
RUN curl -fsSL \
|
||||||
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
||||||
--output sql_exporter.tar.gz \
|
--output sql_exporter.tar.gz \
|
||||||
@@ -258,7 +258,7 @@ WORKDIR /home/nonroot
|
|||||||
|
|
||||||
# Rust
|
# Rust
|
||||||
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
||||||
ENV RUSTC_VERSION=1.84.0
|
ENV RUSTC_VERSION=1.83.0
|
||||||
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
||||||
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
||||||
ARG RUSTFILT_VERSION=0.2.1
|
ARG RUSTFILT_VERSION=0.2.1
|
||||||
|
|||||||
@@ -35,12 +35,10 @@ RUN case $DEBIAN_VERSION in \
|
|||||||
;; \
|
;; \
|
||||||
esac && \
|
esac && \
|
||||||
apt update && \
|
apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \
|
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
|
||||||
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
|
||||||
$VERSION_INSTALLS \
|
$VERSION_INSTALLS
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -115,12 +113,10 @@ ARG DEBIAN_VERSION
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||||
gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
|
||||||
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||||
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||||
protobuf-c-compiler xsltproc \
|
protobuf-c-compiler xsltproc
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
# Postgis 3.5.0 requires SFCGAL 1.4+
|
# Postgis 3.5.0 requires SFCGAL 1.4+
|
||||||
@@ -147,9 +143,9 @@ RUN case "${DEBIAN_VERSION}" in \
|
|||||||
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
|
||||||
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -GNinja . && ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
DESTDIR=/sfcgal ninja install -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja clean && cp -R /sfcgal/* /
|
make clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
ENV PATH="/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
@@ -217,9 +213,9 @@ RUN case "${PG_VERSION}" in \
|
|||||||
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
||||||
@@ -239,9 +235,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
|
||||||
ninja-build python3-dev libncurses5 binutils clang \
|
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# plv8 3.2.3 supports v17
|
# plv8 3.2.3 supports v17
|
||||||
# last release v3.2.3 - Sep 7, 2024
|
# last release v3.2.3 - Sep 7, 2024
|
||||||
@@ -307,10 +301,9 @@ RUN mkdir -p /h3/usr/ && \
|
|||||||
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake .. -GNinja -DBUILD_BENCHMARKS=0 -DCMAKE_BUILD_TYPE=Release \
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
-DBUILD_FUZZERS=0 -DBUILD_FILTERS=0 -DBUILD_GENERATORS=0 -DBUILD_TESTING=0 \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
&& ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/h3 make install && \
|
||||||
DESTDIR=/h3 ninja install && \
|
|
||||||
cp -R /h3/usr / && \
|
cp -R /h3/usr / && \
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
@@ -657,15 +650,14 @@ FROM build-deps AS rdkit-pg-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y \
|
apt-get install --no-install-recommends -y \
|
||||||
libboost-iostreams1.74-dev \
|
libboost-iostreams1.74-dev \
|
||||||
libboost-regex1.74-dev \
|
libboost-regex1.74-dev \
|
||||||
libboost-serialization1.74-dev \
|
libboost-serialization1.74-dev \
|
||||||
libboost-system1.74-dev \
|
libboost-system1.74-dev \
|
||||||
libeigen3-dev \
|
libeigen3-dev \
|
||||||
libboost-all-dev \
|
libboost-all-dev
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# rdkit Release_2024_09_1 supports v17
|
# rdkit Release_2024_09_1 supports v17
|
||||||
# last release Release_2024_09_1 - Sep 27, 2024
|
# last release Release_2024_09_1 - Sep 27, 2024
|
||||||
@@ -701,8 +693,6 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
||||||
-D RDK_TEST_MULTITHREADED=OFF \
|
|
||||||
-D RDK_BUILD_CPP_TESTS=OFF \
|
|
||||||
-D RDK_USE_URF=OFF \
|
-D RDK_USE_URF=OFF \
|
||||||
-D RDK_BUILD_PGSQL=ON \
|
-D RDK_BUILD_PGSQL=ON \
|
||||||
-D RDK_PGSQL_STATIC=ON \
|
-D RDK_PGSQL_STATIC=ON \
|
||||||
@@ -714,10 +704,9 @@ RUN case "${PG_VERSION}" in \
|
|||||||
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
-D RDK_INSTALL_COMIC_FONTS=OFF \
|
||||||
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
|
||||||
-D CMAKE_BUILD_TYPE=Release \
|
-D CMAKE_BUILD_TYPE=Release \
|
||||||
-GNinja \
|
|
||||||
. && \
|
. && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -860,9 +849,8 @@ FROM build-deps AS rust-extensions-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
apt-get install --no-install-recommends -y curl libclang-dev && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -897,9 +885,8 @@ FROM build-deps AS rust-extensions-build-pgrx12
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt-get update && \
|
||||||
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
|
apt-get install --no-install-recommends -y curl libclang-dev && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
useradd -ms /bin/bash nonroot -b /home
|
useradd -ms /bin/bash nonroot -b /home
|
||||||
|
|
||||||
ENV HOME=/home/nonroot
|
ENV HOME=/home/nonroot
|
||||||
@@ -927,22 +914,18 @@ FROM rust-extensions-build-pgrx12 AS pg-onnx-build
|
|||||||
|
|
||||||
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
|
||||||
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
|
||||||
RUN apt update && apt install --no-install-recommends --no-install-suggests -y \
|
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv && \
|
||||||
python3 python3-pip python3-venv && \
|
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
python3 -m venv venv && \
|
python3 -m venv venv && \
|
||||||
. venv/bin/activate && \
|
. venv/bin/activate && \
|
||||||
python3 -m pip install cmake==3.30.5 && \
|
python3 -m pip install cmake==3.30.5 && \
|
||||||
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
|
||||||
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
|
||||||
./build.sh --config Release --parallel --cmake_generator Ninja \
|
./build.sh --config Release --parallel --skip_submodule_sync --skip_tests --allow_running_as_root
|
||||||
--skip_submodule_sync --skip_tests --allow_running_as_root
|
|
||||||
|
|
||||||
|
|
||||||
FROM pg-onnx-build AS pgrag-pg-build
|
FROM pg-onnx-build AS pgrag-pg-build
|
||||||
|
|
||||||
RUN apt update && apt install --no-install-recommends --no-install-suggests -y protobuf-compiler \
|
RUN apt-get install -y protobuf-compiler && \
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/* && \
|
|
||||||
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
|
||||||
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
|
||||||
@@ -1185,25 +1168,6 @@ RUN case "${PG_VERSION}" in \
|
|||||||
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "pg_repack"
|
|
||||||
# compile pg_repack extension
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
|
|
||||||
FROM build-deps AS pg-repack-build
|
|
||||||
ARG PG_VERSION
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
ENV PATH="/usr/local/pgsql/bin/:$PATH"
|
|
||||||
|
|
||||||
RUN wget https://github.com/reorg/pg_repack/archive/refs/tags/ver_1.5.2.tar.gz -O pg_repack.tar.gz && \
|
|
||||||
echo '4516cad42251ed3ad53ff619733004db47d5755acac83f75924cd94d1c4fb681 pg_repack.tar.gz' | sha256sum --check && \
|
|
||||||
mkdir pg_repack-src && cd pg_repack-src && tar xzf ../pg_repack.tar.gz --strip-components=1 -C . && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "neon-pg-ext-build"
|
# Layer "neon-pg-ext-build"
|
||||||
@@ -1249,7 +1213,6 @@ COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pg-repack-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY pgxn/ pgxn/
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
@@ -1285,7 +1248,7 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Compile the Neon-specific `compute_ctl`, `fast_import`, and `local_proxy` binaries
|
# Compile and run the Neon-specific `compute_ctl` and `fast_import` binaries
|
||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
@@ -1295,7 +1258,7 @@ ENV BUILD_TAG=$BUILD_TAG
|
|||||||
USER nonroot
|
USER nonroot
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
COPY --chown=nonroot . .
|
COPY --chown=nonroot . .
|
||||||
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy
|
RUN cd compute_tools && mold -run cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1316,8 +1279,8 @@ COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_
|
|||||||
|
|
||||||
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& apt update \
|
&& apt-get update \
|
||||||
&& apt install --no-install-suggests --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
@@ -1325,8 +1288,7 @@ RUN set -e \
|
|||||||
automake \
|
automake \
|
||||||
libevent-dev \
|
libevent-dev \
|
||||||
libtool \
|
libtool \
|
||||||
pkg-config \
|
pkg-config
|
||||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
|
||||||
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
|
||||||
@@ -1338,6 +1300,20 @@ RUN set -e \
|
|||||||
&& make -j $(nproc) dist_man_MANS= \
|
&& make -j $(nproc) dist_man_MANS= \
|
||||||
&& make install dist_man_MANS=
|
&& make install dist_man_MANS=
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile the Neon-specific `local_proxy` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM $REPOSITORY/$IMAGE:$TAG AS local_proxy
|
||||||
|
ARG BUILD_TAG
|
||||||
|
ENV BUILD_TAG=$BUILD_TAG
|
||||||
|
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin local_proxy
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layers "postgres-exporter" and "sql-exporter"
|
# Layers "postgres-exporter" and "sql-exporter"
|
||||||
@@ -1348,7 +1324,7 @@ FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter
|
|||||||
|
|
||||||
# Keep the version the same as in build-tools.Dockerfile and
|
# Keep the version the same as in build-tools.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
FROM burningalchemist/sql_exporter:0.16.0 AS sql-exporter
|
FROM burningalchemist/sql_exporter:0.13.1 AS sql-exporter
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -1477,7 +1453,7 @@ COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/
|
|||||||
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
|
||||||
|
|
||||||
# local_proxy and its config
|
# local_proxy and its config
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
COPY --from=local_proxy --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
|
||||||
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
||||||
|
|
||||||
# Metrics exporter binaries and configuration files
|
# Metrics exporter binaries and configuration files
|
||||||
@@ -1542,30 +1518,28 @@ RUN apt update && \
|
|||||||
locales \
|
locales \
|
||||||
procps \
|
procps \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
|
||||||
unzip \
|
|
||||||
$VERSION_INSTALLS && \
|
$VERSION_INSTALLS && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
# aws cli is used by fast_import (curl and unzip above are at this time only used for this installation step)
|
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2
|
||||||
|
# used by fast_import
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
ADD https://github.com/peak/s5cmd/releases/download/v2.2.2/s5cmd_2.2.2_linux_$TARGETARCH.deb /tmp/s5cmd.deb
|
||||||
RUN set -ex; \
|
RUN set -ex; \
|
||||||
|
\
|
||||||
|
# Determine the expected checksum based on TARGETARCH
|
||||||
if [ "${TARGETARCH}" = "amd64" ]; then \
|
if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||||
TARGETARCH_ALT="x86_64"; \
|
CHECKSUM="392c385320cd5ffa435759a95af77c215553d967e4b1c0fffe52e4f14c29cf85"; \
|
||||||
CHECKSUM="c9a9df3770a3ff9259cb469b6179e02829687a464e0824d5c32d378820b53a00"; \
|
|
||||||
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
elif [ "${TARGETARCH}" = "arm64" ]; then \
|
||||||
TARGETARCH_ALT="aarch64"; \
|
CHECKSUM="939bee3cf4b5604ddb00e67f8c157b91d7c7a5b553d1fbb6890fad32894b7b46"; \
|
||||||
CHECKSUM="8181730be7891582b38b028112e81b4899ca817e8c616aad807c9e9d1289223a"; \
|
|
||||||
else \
|
else \
|
||||||
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
|
||||||
fi; \
|
fi; \
|
||||||
curl -L "https://awscli.amazonaws.com/awscli-exe-linux-${TARGETARCH_ALT}-2.17.5.zip" -o /tmp/awscliv2.zip; \
|
\
|
||||||
echo "${CHECKSUM} /tmp/awscliv2.zip" | sha256sum -c -; \
|
# Compute and validate the checksum
|
||||||
unzip /tmp/awscliv2.zip -d /tmp/awscliv2; \
|
echo "${CHECKSUM} /tmp/s5cmd.deb" | sha256sum -c -
|
||||||
/tmp/awscliv2/aws/install; \
|
RUN dpkg -i /tmp/s5cmd.deb && rm /tmp/s5cmd.deb
|
||||||
rm -rf /tmp/awscliv2.zip /tmp/awscliv2; \
|
|
||||||
true
|
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
metrics: [
|
metrics: [
|
||||||
import 'sql_exporter/checkpoints_req.libsonnet',
|
import 'sql_exporter/checkpoints_req.libsonnet',
|
||||||
import 'sql_exporter/checkpoints_timed.libsonnet',
|
import 'sql_exporter/checkpoints_timed.libsonnet',
|
||||||
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet',
|
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
||||||
import 'sql_exporter/compute_current_lsn.libsonnet',
|
import 'sql_exporter/compute_current_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
||||||
|
|||||||
@@ -19,10 +19,3 @@ max_prepared_statements=0
|
|||||||
admin_users=postgres
|
admin_users=postgres
|
||||||
unix_socket_dir=/tmp/
|
unix_socket_dir=/tmp/
|
||||||
unix_socket_mode=0777
|
unix_socket_mode=0777
|
||||||
|
|
||||||
;; Disable connection logging. It produces a lot of logs that no one looks at,
|
|
||||||
;; and we can get similar log entries from the proxy too. We had incidents in
|
|
||||||
;; the past where the logging significantly stressed the log device or pgbouncer
|
|
||||||
;; itself.
|
|
||||||
log_connections=0
|
|
||||||
log_disconnections=0
|
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
{
|
{
|
||||||
metric_name: 'compute_backpressure_throttling_seconds_total',
|
metric_name: 'compute_backpressure_throttling_seconds',
|
||||||
type: 'counter',
|
type: 'gauge',
|
||||||
help: 'Time compute has spent throttled',
|
help: 'Time compute has spent throttled',
|
||||||
key_labels: null,
|
key_labels: null,
|
||||||
values: [
|
values: [
|
||||||
'throttled',
|
'throttled',
|
||||||
],
|
],
|
||||||
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql',
|
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
|
||||||
}
|
}
|
||||||
@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
|
|||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
SET search_path TO 'addr_nsp';
|
SET search_path TO 'addr_nsp';
|
||||||
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
||||||
index 8475231735..0653946337 100644
|
index 8475231735..1afae5395f 100644
|
||||||
--- a/src/test/regress/expected/password.out
|
--- a/src/test/regress/expected/password.out
|
||||||
+++ b/src/test/regress/expected/password.out
|
+++ b/src/test/regress/expected/password.out
|
||||||
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
||||||
@@ -1006,63 +1006,65 @@ index 8475231735..0653946337 100644
|
|||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
||||||
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd4 |
|
- regress_passwd4 |
|
||||||
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- Rename a role
|
-- Rename a role
|
||||||
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
-- passwords.
|
-- passwords.
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- so it should be hashed with SCRAM-SHA-256.
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- These may look like valid MD5 secrets, but they are not, so they
|
||||||
|
-- should be hashed with SCRAM-SHA-256.
|
||||||
|
-- trailing garbage at the end
|
||||||
|
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- invalid length
|
||||||
|
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-- Changing the SCRAM iteration count
|
-- Changing the SCRAM iteration count
|
||||||
SET scram_iterations = 1024;
|
SET scram_iterations = 1024;
|
||||||
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
||||||
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
||||||
ORDER BY rolname, rolpassword;
|
ORDER BY rolname, rolpassword;
|
||||||
rolname | rolpassword_masked
|
rolname | rolpassword_masked
|
||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
||||||
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
||||||
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
|
||||||
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
-(9 rows)
|
||||||
|
+(5 rows)
|
||||||
|
|
||||||
-- An empty password is not allowed, in any form
|
-- An empty password is not allowed, in any form
|
||||||
CREATE ROLE regress_passwd_empty PASSWORD '';
|
CREATE ROLE regress_passwd_empty PASSWORD '';
|
||||||
NOTICE: empty string is not a valid password, clearing password
|
NOTICE: empty string is not a valid password, clearing password
|
||||||
@@ -1080,37 +1082,56 @@ index 8475231735..0653946337 100644
|
|||||||
-(1 row)
|
-(1 row)
|
||||||
+(0 rows)
|
+(0 rows)
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
-- Test with invalid stored and server keys.
|
||||||
---
|
--
|
||||||
--- The first is valid, to act as a control. The others have too long
|
-- The first is valid, to act as a control. The others have too long
|
||||||
--- stored/server keys. They will be re-hashed.
|
-- stored/server keys. They will be re-hashed.
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
||||||
-- should not contain the original salt.
|
-- should not contain the original salt.
|
||||||
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
||||||
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw
|
FROM pg_authid
|
||||||
|
WHERE rolname LIKE 'regress_passwd_sha_len%'
|
||||||
ORDER BY rolname;
|
ORDER BY rolname;
|
||||||
rolname | is_rolpassword_rehashed
|
- rolname | is_rolpassword_rehashed
|
||||||
-------------------------+-------------------------
|
--------------------------+-------------------------
|
||||||
- regress_passwd_sha_len0 | f
|
- regress_passwd_sha_len0 | f
|
||||||
+ regress_passwd_sha_len0 | t
|
- regress_passwd_sha_len1 | t
|
||||||
regress_passwd_sha_len1 | t
|
- regress_passwd_sha_len2 | t
|
||||||
regress_passwd_sha_len2 | t
|
-(3 rows)
|
||||||
(3 rows)
|
+ rolname | is_rolpassword_rehashed
|
||||||
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7;
|
+---------+-------------------------
|
||||||
|
+(0 rows)
|
||||||
|
|
||||||
|
DROP ROLE regress_passwd1;
|
||||||
|
DROP ROLE regress_passwd2;
|
||||||
|
DROP ROLE regress_passwd3;
|
||||||
|
DROP ROLE regress_passwd4;
|
||||||
|
DROP ROLE regress_passwd5;
|
||||||
|
+ERROR: role "regress_passwd5" does not exist
|
||||||
|
DROP ROLE regress_passwd6;
|
||||||
|
+ERROR: role "regress_passwd6" does not exist
|
||||||
|
DROP ROLE regress_passwd7;
|
||||||
|
+ERROR: role "regress_passwd7" does not exist
|
||||||
DROP ROLE regress_passwd8;
|
DROP ROLE regress_passwd8;
|
||||||
|
+ERROR: role "regress_passwd8" does not exist
|
||||||
DROP ROLE regress_passwd9;
|
DROP ROLE regress_passwd9;
|
||||||
DROP ROLE regress_passwd_empty;
|
DROP ROLE regress_passwd_empty;
|
||||||
+ERROR: role "regress_passwd_empty" does not exist
|
+ERROR: role "regress_passwd_empty" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len0;
|
DROP ROLE regress_passwd_sha_len0;
|
||||||
|
+ERROR: role "regress_passwd_sha_len0" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len1;
|
DROP ROLE regress_passwd_sha_len1;
|
||||||
|
+ERROR: role "regress_passwd_sha_len1" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len2;
|
DROP ROLE regress_passwd_sha_len2;
|
||||||
|
+ERROR: role "regress_passwd_sha_len2" does not exist
|
||||||
|
-- all entries should have been removed
|
||||||
|
SELECT rolname, rolpassword
|
||||||
|
FROM pg_authid
|
||||||
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
||||||
index 5b9dba7b32..cc408dad42 100644
|
index 5b9dba7b32..cc408dad42 100644
|
||||||
--- a/src/test/regress/expected/privileges.out
|
--- a/src/test/regress/expected/privileges.out
|
||||||
@@ -3173,7 +3194,7 @@ index 1a6c61f49d..1c31ac6a53 100644
|
|||||||
-- Test generic object addressing/identification functions
|
-- Test generic object addressing/identification functions
|
||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
||||||
index 53e86b0b6c..0303fdfe96 100644
|
index 53e86b0b6c..f07cf1ec54 100644
|
||||||
--- a/src/test/regress/sql/password.sql
|
--- a/src/test/regress/sql/password.sql
|
||||||
+++ b/src/test/regress/sql/password.sql
|
+++ b/src/test/regress/sql/password.sql
|
||||||
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
||||||
@@ -3192,59 +3213,23 @@ index 53e86b0b6c..0303fdfe96 100644
|
|||||||
|
|
||||||
-- check list of created entries
|
-- check list of created entries
|
||||||
--
|
--
|
||||||
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
|
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Changing the SCRAM iteration count
|
|
||||||
SET scram_iterations = 1024;
|
|
||||||
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
|
|
||||||
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
|
|
||||||
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
|
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
|
||||||
---
|
|
||||||
--- The first is valid, to act as a control. The others have too long
|
|
||||||
--- stored/server keys. They will be re-hashed.
|
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
|
||||||
-- should not contain the original salt.
|
|
||||||
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
||||||
index 249df17a58..b258e7f26a 100644
|
index 249df17a58..b258e7f26a 100644
|
||||||
--- a/src/test/regress/sql/privileges.sql
|
--- a/src/test/regress/sql/privileges.sql
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ license.workspace = true
|
|||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
# Enables test specific features.
|
# Enables test specific features.
|
||||||
testing = ["fail/failpoints"]
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -15,15 +15,13 @@ aws-config.workspace = true
|
|||||||
aws-sdk-s3.workspace = true
|
aws-sdk-s3.workspace = true
|
||||||
aws-sdk-kms.workspace = true
|
aws-sdk-kms.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
axum = { workspace = true, features = [] }
|
|
||||||
camino.workspace = true
|
camino.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
cfg-if.workspace = true
|
cfg-if.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
fail.workspace = true
|
|
||||||
flate2.workspace = true
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
http.workspace = true
|
hyper0 = { workspace = true, features = ["full"] }
|
||||||
metrics.workspace = true
|
metrics.workspace = true
|
||||||
nix.workspace = true
|
nix.workspace = true
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
@@ -38,8 +36,6 @@ serde_with.workspace = true
|
|||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
signal-hook.workspace = true
|
signal-hook.workspace = true
|
||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
tower.workspace = true
|
|
||||||
tower-http.workspace = true
|
|
||||||
reqwest = { workspace = true, features = ["json"] }
|
reqwest = { workspace = true, features = ["json"] }
|
||||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||||
tokio-postgres.workspace = true
|
tokio-postgres.workspace = true
|
||||||
|
|||||||
@@ -60,22 +60,19 @@ use compute_tools::compute::{
|
|||||||
};
|
};
|
||||||
use compute_tools::configurator::launch_configurator;
|
use compute_tools::configurator::launch_configurator;
|
||||||
use compute_tools::extension_server::get_pg_version_string;
|
use compute_tools::extension_server::get_pg_version_string;
|
||||||
use compute_tools::http::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use compute_tools::swap::resize_swap;
|
use compute_tools::swap::resize_swap;
|
||||||
use rlimit::{setrlimit, Resource};
|
use rlimit::{setrlimit, Resource};
|
||||||
use utils::failpoint_support;
|
|
||||||
|
|
||||||
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
// in-case of not-set environment var
|
// in-case of not-set environment var
|
||||||
const BUILD_TAG_DEFAULT: &str = "latest";
|
const BUILD_TAG_DEFAULT: &str = "latest";
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
let scenario = failpoint_support::init();
|
|
||||||
|
|
||||||
let (build_tag, clap_args) = init()?;
|
let (build_tag, clap_args) = init()?;
|
||||||
|
|
||||||
// enable core dumping for all child processes
|
// enable core dumping for all child processes
|
||||||
@@ -103,8 +100,6 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
maybe_delay_exit(delay_exit);
|
maybe_delay_exit(delay_exit);
|
||||||
|
|
||||||
scenario.teardown();
|
|
||||||
|
|
||||||
deinit_and_exit(wait_pg_result);
|
deinit_and_exit(wait_pg_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,48 +246,47 @@ fn try_spec_from_cli(
|
|||||||
let compute_id = matches.get_one::<String>("compute-id");
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
// First, try to get cluster spec from the cli argument
|
let spec;
|
||||||
if let Some(spec_json) = spec_json {
|
let mut live_config_allowed = false;
|
||||||
info!("got spec from cli argument {}", spec_json);
|
match spec_json {
|
||||||
return Ok(CliSpecParams {
|
// First, try to get cluster spec from the cli argument
|
||||||
spec: Some(serde_json::from_str(spec_json)?),
|
Some(json) => {
|
||||||
live_config_allowed: false,
|
info!("got spec from cli argument {}", json);
|
||||||
});
|
spec = Some(serde_json::from_str(json)?);
|
||||||
}
|
|
||||||
|
|
||||||
// Second, try to read it from the file if path is provided
|
|
||||||
if let Some(spec_path) = spec_path {
|
|
||||||
let file = File::open(Path::new(spec_path))?;
|
|
||||||
return Ok(CliSpecParams {
|
|
||||||
spec: Some(serde_json::from_reader(file)?),
|
|
||||||
live_config_allowed: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(compute_id) = compute_id else {
|
|
||||||
panic!(
|
|
||||||
"compute spec should be provided by one of the following ways: \
|
|
||||||
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
|
||||||
);
|
|
||||||
};
|
|
||||||
let Some(control_plane_uri) = control_plane_uri else {
|
|
||||||
panic!("must specify both --control-plane-uri and --compute-id or none");
|
|
||||||
};
|
|
||||||
|
|
||||||
match get_spec_from_control_plane(control_plane_uri, compute_id) {
|
|
||||||
Ok(spec) => Ok(CliSpecParams {
|
|
||||||
spec,
|
|
||||||
live_config_allowed: true,
|
|
||||||
}),
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"cannot get response from control plane: {}\n\
|
|
||||||
neither spec nor confirmation that compute is in the Empty state was received",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Err(e)
|
|
||||||
}
|
}
|
||||||
}
|
None => {
|
||||||
|
// Second, try to read it from the file if path is provided
|
||||||
|
if let Some(sp) = spec_path {
|
||||||
|
let path = Path::new(sp);
|
||||||
|
let file = File::open(path)?;
|
||||||
|
spec = Some(serde_json::from_reader(file)?);
|
||||||
|
live_config_allowed = true;
|
||||||
|
} else if let Some(id) = compute_id {
|
||||||
|
if let Some(cp_base) = control_plane_uri {
|
||||||
|
live_config_allowed = true;
|
||||||
|
spec = match get_spec_from_control_plane(cp_base, id) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
error!("cannot get response from control plane: {}", e);
|
||||||
|
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!(
|
||||||
|
"compute spec should be provided by one of the following ways: \
|
||||||
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(CliSpecParams {
|
||||||
|
spec,
|
||||||
|
live_config_allowed,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CliSpecParams {
|
struct CliSpecParams {
|
||||||
@@ -424,14 +418,9 @@ fn start_postgres(
|
|||||||
"running compute with features: {:?}",
|
"running compute with features: {:?}",
|
||||||
state.pspec.as_ref().unwrap().spec.features
|
state.pspec.as_ref().unwrap().spec.features
|
||||||
);
|
);
|
||||||
// before we release the mutex, fetch some parameters for later.
|
// before we release the mutex, fetch the swap size (if any) for later.
|
||||||
let &ComputeSpec {
|
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
|
||||||
swap_size_bytes,
|
let disk_quota_bytes = state.pspec.as_ref().unwrap().spec.disk_quota_bytes;
|
||||||
disk_quota_bytes,
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
disable_lfc_resizing,
|
|
||||||
..
|
|
||||||
} = &state.pspec.as_ref().unwrap().spec;
|
|
||||||
drop(state);
|
drop(state);
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
@@ -493,10 +482,7 @@ fn start_postgres(
|
|||||||
let mut pg = None;
|
let mut pg = None;
|
||||||
if !prestartup_failed {
|
if !prestartup_failed {
|
||||||
pg = match compute.start_compute() {
|
pg = match compute.start_compute() {
|
||||||
Ok(pg) => {
|
Ok(pg) => Some(pg),
|
||||||
info!(postmaster_pid = %pg.0.id(), "Postgres was started");
|
|
||||||
Some(pg)
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("could not start the compute node: {:#}", err);
|
error!("could not start the compute node: {:#}", err);
|
||||||
compute.set_failed_status(err);
|
compute.set_failed_status(err);
|
||||||
@@ -539,18 +525,11 @@ fn start_postgres(
|
|||||||
// This token is used internally by the monitor to clean up all threads
|
// This token is used internally by the monitor to clean up all threads
|
||||||
let token = CancellationToken::new();
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
// don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
|
|
||||||
let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
file_cache_connstr.cloned()
|
|
||||||
};
|
|
||||||
|
|
||||||
let vm_monitor = rt.as_ref().map(|rt| {
|
let vm_monitor = rt.as_ref().map(|rt| {
|
||||||
rt.spawn(vm_monitor::start(
|
rt.spawn(vm_monitor::start(
|
||||||
Box::leak(Box::new(vm_monitor::Args {
|
Box::leak(Box::new(vm_monitor::Args {
|
||||||
cgroup: cgroup.cloned(),
|
cgroup: cgroup.cloned(),
|
||||||
pgconnstr,
|
pgconnstr: file_cache_connstr.cloned(),
|
||||||
addr: vm_monitor_addr.clone(),
|
addr: vm_monitor_addr.clone(),
|
||||||
})),
|
})),
|
||||||
token.clone(),
|
token.clone(),
|
||||||
@@ -594,8 +573,6 @@ fn wait_postgres(pg: Option<PostgresHandle>) -> Result<WaitPostgresResult> {
|
|||||||
// propagate to Postgres and it will be shut down as well.
|
// propagate to Postgres and it will be shut down as well.
|
||||||
let mut exit_code = None;
|
let mut exit_code = None;
|
||||||
if let Some((mut pg, logs_handle)) = pg {
|
if let Some((mut pg, logs_handle)) = pg {
|
||||||
info!(postmaster_pid = %pg.id(), "Waiting for Postgres to exit");
|
|
||||||
|
|
||||||
let ecode = pg
|
let ecode = pg
|
||||||
.wait()
|
.wait()
|
||||||
.expect("failed to start waiting on Postgres process");
|
.expect("failed to start waiting on Postgres process");
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ use nix::unistd::Pid;
|
|||||||
use tracing::{info, info_span, warn, Instrument};
|
use tracing::{info, info_span, warn, Instrument};
|
||||||
use utils::fs_ext::is_directory_empty;
|
use utils::fs_ext::is_directory_empty;
|
||||||
|
|
||||||
#[path = "fast_import/aws_s3_sync.rs"]
|
|
||||||
mod aws_s3_sync;
|
|
||||||
#[path = "fast_import/child_stdio_to_log.rs"]
|
#[path = "fast_import/child_stdio_to_log.rs"]
|
||||||
mod child_stdio_to_log;
|
mod child_stdio_to_log;
|
||||||
#[path = "fast_import/s3_uri.rs"]
|
#[path = "fast_import/s3_uri.rs"]
|
||||||
mod s3_uri;
|
mod s3_uri;
|
||||||
|
#[path = "fast_import/s5cmd.rs"]
|
||||||
|
mod s5cmd;
|
||||||
|
|
||||||
#[derive(clap::Parser)]
|
#[derive(clap::Parser)]
|
||||||
struct Args {
|
struct Args {
|
||||||
@@ -326,7 +326,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("upload pgdata");
|
info!("upload pgdata");
|
||||||
aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
|
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
|
||||||
.await
|
.await
|
||||||
.context("sync dump directory to destination")?;
|
.context("sync dump directory to destination")?;
|
||||||
|
|
||||||
@@ -334,10 +334,10 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let status_dir = working_directory.join("status");
|
let status_dir = working_directory.join("status");
|
||||||
std::fs::create_dir(&status_dir).context("create status directory")?;
|
std::fs::create_dir(&status_dir).context("create status directory")?;
|
||||||
let status_file = status_dir.join("pgdata");
|
let status_file = status_dir.join("status");
|
||||||
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
||||||
.context("write status file")?;
|
.context("write status file")?;
|
||||||
aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
|
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
|
||||||
.await
|
.await
|
||||||
.context("sync status directory to destination")?;
|
.context("sync status directory to destination")?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,21 +4,24 @@ use camino::Utf8Path;
|
|||||||
use super::s3_uri::S3Uri;
|
use super::s3_uri::S3Uri;
|
||||||
|
|
||||||
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
||||||
let mut builder = tokio::process::Command::new("aws");
|
let mut builder = tokio::process::Command::new("s5cmd");
|
||||||
|
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
|
||||||
|
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
|
||||||
|
builder.arg("--endpoint-url").arg(val);
|
||||||
|
}
|
||||||
builder
|
builder
|
||||||
.arg("s3")
|
|
||||||
.arg("sync")
|
.arg("sync")
|
||||||
.arg(local.as_str())
|
.arg(local.as_str())
|
||||||
.arg(remote.to_string());
|
.arg(remote.to_string());
|
||||||
let st = builder
|
let st = builder
|
||||||
.spawn()
|
.spawn()
|
||||||
.context("spawn aws s3 sync")?
|
.context("spawn s5cmd")?
|
||||||
.wait()
|
.wait()
|
||||||
.await
|
.await
|
||||||
.context("wait for aws s3 sync")?;
|
.context("wait for s5cmd")?;
|
||||||
if st.success() {
|
if st.success() {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow::anyhow!("aws s3 sync failed"))
|
Err(anyhow::anyhow!("s5cmd failed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -36,11 +36,11 @@ pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<Cat
|
|||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum SchemaDumpError {
|
pub enum SchemaDumpError {
|
||||||
#[error("database does not exist")]
|
#[error("Database does not exist.")]
|
||||||
DatabaseDoesNotExist,
|
DatabaseDoesNotExist,
|
||||||
#[error("failed to execute pg_dump")]
|
#[error("Failed to execute pg_dump.")]
|
||||||
IO(#[from] std::io::Error),
|
IO(#[from] std::io::Error),
|
||||||
#[error("unexpected I/O error")]
|
#[error("Unexpected error.")]
|
||||||
Unexpected,
|
Unexpected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use compute_api::spec::{Database, PgIdent, Role};
|
use compute_api::spec::{PgIdent, Role};
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
@@ -45,10 +45,8 @@ use crate::spec_apply::ApplySpecPhase::{
|
|||||||
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
||||||
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
||||||
};
|
};
|
||||||
use crate::spec_apply::PerDatabasePhase;
|
|
||||||
use crate::spec_apply::PerDatabasePhase::{
|
use crate::spec_apply::PerDatabasePhase::{
|
||||||
ChangeSchemaPerms, DeleteDBRoleReferences, DropSubscriptionsForDeletedDatabases,
|
ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
|
||||||
HandleAnonExtension,
|
|
||||||
};
|
};
|
||||||
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
||||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||||
@@ -836,7 +834,7 @@ impl ComputeNode {
|
|||||||
conf
|
conf
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_maintenance_client(
|
async fn get_maintenance_client(
|
||||||
conf: &tokio_postgres::Config,
|
conf: &tokio_postgres::Config,
|
||||||
) -> Result<tokio_postgres::Client> {
|
) -> Result<tokio_postgres::Client> {
|
||||||
let mut conf = conf.clone();
|
let mut conf = conf.clone();
|
||||||
@@ -945,78 +943,6 @@ impl ComputeNode {
|
|||||||
dbs: databases,
|
dbs: databases,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Apply special pre drop database phase.
|
|
||||||
// NOTE: we use the code of RunInEachDatabase phase for parallelism
|
|
||||||
// and connection management, but we don't really run it in *each* database,
|
|
||||||
// only in databases, we're about to drop.
|
|
||||||
info!("Applying PerDatabase (pre-dropdb) phase");
|
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
|
||||||
|
|
||||||
// Run the phase for each database that we're about to drop.
|
|
||||||
let db_processes = spec
|
|
||||||
.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter_map(move |op| {
|
|
||||||
if op.action.as_str() == "delete_db" {
|
|
||||||
Some(op.name.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map(|dbname| {
|
|
||||||
let spec = spec.clone();
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
let jwks_roles = jwks_roles.clone();
|
|
||||||
let mut conf = conf.as_ref().clone();
|
|
||||||
let concurrency_token = concurrency_token.clone();
|
|
||||||
// We only need dbname field for this phase, so set other fields to dummy values
|
|
||||||
let db = DB::UserDB(Database {
|
|
||||||
name: dbname.clone(),
|
|
||||||
owner: "cloud_admin".to_string(),
|
|
||||||
options: None,
|
|
||||||
restrict_conn: false,
|
|
||||||
invalid: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
debug!("Applying per-database phases for Database {:?}", &db);
|
|
||||||
|
|
||||||
match &db {
|
|
||||||
DB::SystemDB => {}
|
|
||||||
DB::UserDB(db) => {
|
|
||||||
conf.dbname(db.name.as_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let conf = Arc::new(conf);
|
|
||||||
let fut = Self::apply_spec_sql_db(
|
|
||||||
spec.clone(),
|
|
||||||
conf,
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
concurrency_token.clone(),
|
|
||||||
db,
|
|
||||||
[DropSubscriptionsForDeletedDatabases].to_vec(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(spawn(fut))
|
|
||||||
})
|
|
||||||
.collect::<Vec<Result<_, anyhow::Error>>>();
|
|
||||||
|
|
||||||
for process in db_processes.into_iter() {
|
|
||||||
let handle = process?;
|
|
||||||
if let Err(e) = handle.await? {
|
|
||||||
// Handle the error case where the database does not exist
|
|
||||||
// We do not check whether the DB exists or not in the deletion phase,
|
|
||||||
// so we shouldn't be strict about it in pre-deletion cleanup as well.
|
|
||||||
if e.to_string().contains("does not exist") {
|
|
||||||
warn!("Error dropping subscription: {}", e);
|
|
||||||
} else {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
for phase in [
|
for phase in [
|
||||||
CreateSuperUser,
|
CreateSuperUser,
|
||||||
DropInvalidDatabases,
|
DropInvalidDatabases,
|
||||||
@@ -1036,7 +962,7 @@ impl ComputeNode {
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Applying RunInEachDatabase2 phase");
|
info!("Applying RunInEachDatabase phase");
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
||||||
|
|
||||||
let db_processes = spec
|
let db_processes = spec
|
||||||
@@ -1071,12 +997,6 @@ impl ComputeNode {
|
|||||||
jwks_roles.clone(),
|
jwks_roles.clone(),
|
||||||
concurrency_token.clone(),
|
concurrency_token.clone(),
|
||||||
db,
|
db,
|
||||||
[
|
|
||||||
DeleteDBRoleReferences,
|
|
||||||
ChangeSchemaPerms,
|
|
||||||
HandleAnonExtension,
|
|
||||||
]
|
|
||||||
.to_vec(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(spawn(fut))
|
Ok(spawn(fut))
|
||||||
@@ -1123,13 +1043,16 @@ impl ComputeNode {
|
|||||||
jwks_roles: Arc<HashSet<String>>,
|
jwks_roles: Arc<HashSet<String>>,
|
||||||
concurrency_token: Arc<tokio::sync::Semaphore>,
|
concurrency_token: Arc<tokio::sync::Semaphore>,
|
||||||
db: DB,
|
db: DB,
|
||||||
subphases: Vec<PerDatabasePhase>,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let _permit = concurrency_token.acquire().await?;
|
let _permit = concurrency_token.acquire().await?;
|
||||||
|
|
||||||
let mut client_conn = None;
|
let mut client_conn = None;
|
||||||
|
|
||||||
for subphase in subphases {
|
for subphase in [
|
||||||
|
DeleteDBRoleReferences,
|
||||||
|
ChangeSchemaPerms,
|
||||||
|
HandleAnonExtension,
|
||||||
|
] {
|
||||||
apply_operations(
|
apply_operations(
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
ctx.clone(),
|
ctx.clone(),
|
||||||
@@ -1258,19 +1181,8 @@ impl ComputeNode {
|
|||||||
let mut conf = postgres::config::Config::from(conf);
|
let mut conf = postgres::config::Config::from(conf);
|
||||||
conf.application_name("compute_ctl:migrations");
|
conf.application_name("compute_ctl:migrations");
|
||||||
|
|
||||||
match conf.connect(NoTls) {
|
let mut client = conf.connect(NoTls)?;
|
||||||
Ok(mut client) => {
|
handle_migrations(&mut client).context("apply_config handle_migrations")
|
||||||
if let Err(e) = handle_migrations(&mut client) {
|
|
||||||
error!("Failed to run migrations: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"Failed to connect to the compute for running migrations: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
Ok::<(), anyhow::Error>(())
|
||||||
|
|||||||
591
compute_tools/src/http/api.rs
Normal file
591
compute_tools/src/http/api.rs
Normal file
@@ -0,0 +1,591 @@
|
|||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::net::Ipv6Addr;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use crate::catalog::SchemaDumpError;
|
||||||
|
use crate::catalog::{get_database_schema, get_dbs_and_roles};
|
||||||
|
use crate::compute::forward_termination_signal;
|
||||||
|
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use crate::installed_extensions;
|
||||||
|
use compute_api::requests::{ConfigurationRequest, ExtensionInstallRequest, SetRoleGrantsRequest};
|
||||||
|
use compute_api::responses::{
|
||||||
|
ComputeStatus, ComputeStatusResponse, ExtensionInstallResult, GenericAPIError,
|
||||||
|
SetRoleGrantsResponse,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use hyper::header::CONTENT_TYPE;
|
||||||
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
|
use metrics::proto::MetricFamily;
|
||||||
|
use metrics::Encoder;
|
||||||
|
use metrics::TextEncoder;
|
||||||
|
use tokio::task;
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
use tracing_utils::http::OtelName;
|
||||||
|
use utils::http::request::must_get_query_param;
|
||||||
|
|
||||||
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
|
ComputeStatusResponse {
|
||||||
|
start_time: state.start_time,
|
||||||
|
tenant: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.tenant_id.to_string()),
|
||||||
|
timeline: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.timeline_id.to_string()),
|
||||||
|
status: state.status,
|
||||||
|
last_active: state.last_active,
|
||||||
|
error: state.error.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service function to handle all available routes.
|
||||||
|
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||||
|
//
|
||||||
|
// NOTE: The URI path is currently included in traces. That's OK because
|
||||||
|
// it doesn't contain any variable parts or sensitive information. But
|
||||||
|
// please keep that in mind if you change the routing here.
|
||||||
|
//
|
||||||
|
match (req.method(), req.uri().path()) {
|
||||||
|
// Serialized compute state.
|
||||||
|
(&Method::GET, "/status") => {
|
||||||
|
debug!("serving /status GET request");
|
||||||
|
let state = compute.state.lock().unwrap();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Response::new(Body::from(serde_json::to_string(&status_response).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
||||||
|
// future use for Prometheus metrics format.
|
||||||
|
(&Method::GET, "/metrics.json") => {
|
||||||
|
info!("serving /metrics.json GET request");
|
||||||
|
let metrics = compute.state.lock().unwrap().metrics.clone();
|
||||||
|
Response::new(Body::from(serde_json::to_string(&metrics).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prometheus metrics
|
||||||
|
(&Method::GET, "/metrics") => {
|
||||||
|
debug!("serving /metrics GET request");
|
||||||
|
|
||||||
|
// When we call TextEncoder::encode() below, it will immediately
|
||||||
|
// return an error if a metric family has no metrics, so we need to
|
||||||
|
// preemptively filter out metric families with no metrics.
|
||||||
|
let metrics = installed_extensions::collect()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|m| !m.get_metric().is_empty())
|
||||||
|
.collect::<Vec<MetricFamily>>();
|
||||||
|
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
let mut buffer = vec![];
|
||||||
|
|
||||||
|
if let Err(err) = encoder.encode(&metrics, &mut buffer) {
|
||||||
|
let msg = format!("error handling /metrics request: {err}");
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
match Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(CONTENT_TYPE, encoder.format_type())
|
||||||
|
.body(Body::from(buffer))
|
||||||
|
{
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(err) => {
|
||||||
|
let msg = format!("error handling /metrics request: {err}");
|
||||||
|
error!(msg);
|
||||||
|
render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Collect Postgres current usage insights
|
||||||
|
(&Method::GET, "/insights") => {
|
||||||
|
info!("serving /insights GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!("compute is not running, current status: {:?}", status);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let insights = compute.collect_insights().await;
|
||||||
|
Response::new(Body::from(insights))
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/check_writability") => {
|
||||||
|
info!("serving /check_writability POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for check_writability request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = crate::checker::check_writability(compute).await;
|
||||||
|
match res {
|
||||||
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
|
Err(e) => {
|
||||||
|
error!("check_writability failed: {}", e);
|
||||||
|
Response::new(Body::from(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/extensions") => {
|
||||||
|
info!("serving /extensions POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for extensions request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
|
||||||
|
}
|
||||||
|
|
||||||
|
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let request = serde_json::from_slice::<ExtensionInstallRequest>(&request).unwrap();
|
||||||
|
let res = compute
|
||||||
|
.install_extension(&request.extension, &request.database, request.version)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
Ok(version) => render_json(Body::from(
|
||||||
|
serde_json::to_string(&ExtensionInstallResult {
|
||||||
|
extension: request.extension,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
)),
|
||||||
|
Err(e) => {
|
||||||
|
error!("install_extension failed: {}", e);
|
||||||
|
render_json_error(&e.to_string(), StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/info") => {
|
||||||
|
let num_cpus = num_cpus::get_physical();
|
||||||
|
info!("serving /info GET request. num_cpus: {}", num_cpus);
|
||||||
|
Response::new(Body::from(
|
||||||
|
serde_json::json!({
|
||||||
|
"num_cpus": num_cpus,
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept spec in JSON format and request compute configuration. If
|
||||||
|
// anything goes wrong after we set the compute status to `ConfigurationPending`
|
||||||
|
// and update compute state with new spec, we basically leave compute
|
||||||
|
// in the potentially wrong state. That said, it's control-plane's
|
||||||
|
// responsibility to watch compute state after reconfiguration request
|
||||||
|
// and to clean restart in case of errors.
|
||||||
|
(&Method::POST, "/configure") => {
|
||||||
|
info!("serving /configure POST request");
|
||||||
|
match handle_configure_request(req, compute).await {
|
||||||
|
Ok(msg) => Response::new(Body::from(msg)),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /configure request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/terminate") => {
|
||||||
|
info!("serving /terminate POST request");
|
||||||
|
match handle_terminate_request(compute).await {
|
||||||
|
Ok(()) => Response::new(Body::empty()),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /terminate request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/dbs_and_roles") => {
|
||||||
|
info!("serving /dbs_and_roles GET request",);
|
||||||
|
match get_dbs_and_roles(compute).await {
|
||||||
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
|
Err(_) => {
|
||||||
|
render_json_error("can't get dbs and roles", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/database_schema") => {
|
||||||
|
let database = match must_get_query_param(&req, "database") {
|
||||||
|
Err(e) => return e.into_response(),
|
||||||
|
Ok(database) => database,
|
||||||
|
};
|
||||||
|
info!("serving /database_schema GET request with database: {database}",);
|
||||||
|
match get_database_schema(compute, &database).await {
|
||||||
|
Ok(res) => render_plain(Body::wrap_stream(res)),
|
||||||
|
Err(SchemaDumpError::DatabaseDoesNotExist) => {
|
||||||
|
render_json_error("database does not exist", StatusCode::NOT_FOUND)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("can't get schema dump: {}", e);
|
||||||
|
render_json_error("can't get schema dump", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/grants") => {
|
||||||
|
info!("serving /grants POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for set_role_grants request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
|
||||||
|
}
|
||||||
|
|
||||||
|
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let request = serde_json::from_slice::<SetRoleGrantsRequest>(&request).unwrap();
|
||||||
|
|
||||||
|
let res = compute
|
||||||
|
.set_role_grants(
|
||||||
|
&request.database,
|
||||||
|
&request.schema,
|
||||||
|
&request.privileges,
|
||||||
|
&request.role,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
Ok(()) => render_json(Body::from(
|
||||||
|
serde_json::to_string(&SetRoleGrantsResponse {
|
||||||
|
database: request.database,
|
||||||
|
schema: request.schema,
|
||||||
|
role: request.role,
|
||||||
|
privileges: request.privileges,
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
)),
|
||||||
|
Err(e) => render_json_error(
|
||||||
|
&format!("could not grant role privileges to the schema: {e}"),
|
||||||
|
// TODO: can we filter on role/schema not found errors
|
||||||
|
// and return appropriate error code?
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the list of installed extensions
|
||||||
|
// currently only used in python tests
|
||||||
|
// TODO: call it from cplane
|
||||||
|
(&Method::GET, "/installed_extensions") => {
|
||||||
|
info!("serving /installed_extensions GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for extensions request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let conf = compute.get_conn_conf(None);
|
||||||
|
let res =
|
||||||
|
task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
|
Err(e) => render_json_error(
|
||||||
|
&format!("could not get list of installed extensions: {}", e),
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// download extension files from remote extension storage on demand
|
||||||
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
|
info!("serving {:?} POST request", route);
|
||||||
|
info!("req.uri {:?}", req.uri());
|
||||||
|
|
||||||
|
// don't even try to download extensions
|
||||||
|
// if no remote storage is configured
|
||||||
|
if compute.ext_remote_storage.is_none() {
|
||||||
|
info!("no extensions remote storage configured");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut is_library = false;
|
||||||
|
if let Some(params) = req.uri().query() {
|
||||||
|
info!("serving {:?} POST request with params: {}", route, params);
|
||||||
|
if params == "is_library=true" {
|
||||||
|
is_library = true;
|
||||||
|
} else {
|
||||||
|
let mut resp = Response::new(Body::from("Wrong request parameters"));
|
||||||
|
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let filename = route.split('/').last().unwrap().to_string();
|
||||||
|
info!("serving /extension_server POST request, filename: {filename:?} is_library: {is_library}");
|
||||||
|
|
||||||
|
// get ext_name and path from spec
|
||||||
|
// don't lock compute_state for too long
|
||||||
|
let ext = {
|
||||||
|
let compute_state = compute.state.lock().unwrap();
|
||||||
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
|
let spec = &pspec.spec;
|
||||||
|
|
||||||
|
// debug only
|
||||||
|
info!("spec: {:?}", spec);
|
||||||
|
|
||||||
|
let remote_extensions = match spec.remote_extensions.as_ref() {
|
||||||
|
Some(r) => r,
|
||||||
|
None => {
|
||||||
|
info!("no remote extensions spec was provided");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
remote_extensions.get_ext(
|
||||||
|
&filename,
|
||||||
|
is_library,
|
||||||
|
&compute.build_tag,
|
||||||
|
&compute.pgversion,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
match ext {
|
||||||
|
Ok((ext_name, ext_path)) => {
|
||||||
|
match compute.download_extension(ext_name, ext_path).await {
|
||||||
|
Ok(_) => Response::new(Body::from("OK")),
|
||||||
|
Err(e) => {
|
||||||
|
error!("extension download failed: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from(e.to_string()));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("extension download failed to find extension: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from("failed to find file"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the `404 Not Found` for any other routes.
|
||||||
|
_ => {
|
||||||
|
let mut not_found = Response::new(Body::from("404 Not Found"));
|
||||||
|
*not_found.status_mut() = StatusCode::NOT_FOUND;
|
||||||
|
not_found
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_configure_request(
|
||||||
|
req: Request<Body>,
|
||||||
|
compute: &Arc<ComputeNode>,
|
||||||
|
) -> Result<String, (String, StatusCode)> {
|
||||||
|
if !compute.live_config_allowed {
|
||||||
|
return Err((
|
||||||
|
"live configuration is not allowed for this compute node".to_string(),
|
||||||
|
StatusCode::PRECONDITION_FAILED,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let spec_raw = String::from_utf8(body_bytes.to_vec()).unwrap();
|
||||||
|
if let Ok(request) = serde_json::from_str::<ConfigurationRequest>(&spec_raw) {
|
||||||
|
let spec = request.spec;
|
||||||
|
|
||||||
|
let parsed_spec = match ParsedSpec::try_from(spec) {
|
||||||
|
Ok(ps) => ps,
|
||||||
|
Err(msg) => return Err((msg, StatusCode::BAD_REQUEST)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// XXX: wrap state update under lock in code blocks. Otherwise,
|
||||||
|
// we will try to `Send` `mut state` into the spawned thread
|
||||||
|
// bellow, which will cause error:
|
||||||
|
// ```
|
||||||
|
// error: future cannot be sent between threads safely
|
||||||
|
// ```
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for configuration request: {:?}",
|
||||||
|
state.status.clone()
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.pspec = Some(parsed_spec);
|
||||||
|
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
||||||
|
drop(state);
|
||||||
|
info!("set new spec and notified waiters");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Running.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Running {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become Running, current status: {:?}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::Failed {
|
||||||
|
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
||||||
|
let msg = format!("compute configuration failed: {:?}", err);
|
||||||
|
return Err((msg, StatusCode::INTERNAL_SERVER_ERROR));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
|
||||||
|
// Return current compute state if everything went well.
|
||||||
|
let state = compute.state.lock().unwrap().clone();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Ok(serde_json::to_string(&status_response).unwrap())
|
||||||
|
} else {
|
||||||
|
Err(("invalid spec".to_string(), StatusCode::BAD_REQUEST))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
|
||||||
|
let error = GenericAPIError {
|
||||||
|
error: e.to_string(),
|
||||||
|
};
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
|
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_plain(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "text/plain")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status == ComputeStatus::Terminated {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for termination request: {}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
|
||||||
|
drop(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
forward_termination_signal();
|
||||||
|
info!("sent signal and notified waiters");
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Terminated.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Terminated {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become {}, current status: {:?}",
|
||||||
|
ComputeStatus::Terminated,
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
info!("terminated Postgres");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
||||||
|
#[tokio::main]
|
||||||
|
async fn serve(port: u16, state: Arc<ComputeNode>) {
|
||||||
|
// this usually binds to both IPv4 and IPv6 on linux
|
||||||
|
// see e.g. https://github.com/rust-lang/rust/pull/34440
|
||||||
|
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
|
||||||
|
|
||||||
|
let make_service = make_service_fn(move |_conn| {
|
||||||
|
let state = state.clone();
|
||||||
|
async move {
|
||||||
|
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||||
|
let state = state.clone();
|
||||||
|
async move {
|
||||||
|
Ok::<_, Infallible>(
|
||||||
|
// NOTE: We include the URI path in the string. It
|
||||||
|
// doesn't contain any variable parts or sensitive
|
||||||
|
// information in this API.
|
||||||
|
tracing_utils::http::tracing_handler(
|
||||||
|
req,
|
||||||
|
|req| routes(req, &state),
|
||||||
|
OtelName::UriPath,
|
||||||
|
)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
info!("starting HTTP server on {}", addr);
|
||||||
|
|
||||||
|
let server = Server::bind(&addr).serve(make_service);
|
||||||
|
|
||||||
|
// Run this server forever
|
||||||
|
if let Err(e) = server.await {
|
||||||
|
error!("server error: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
||||||
|
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
||||||
|
let state = Arc::clone(state);
|
||||||
|
|
||||||
|
Ok(thread::Builder::new()
|
||||||
|
.name("http-endpoint".into())
|
||||||
|
.spawn(move || serve(port, state))?)
|
||||||
|
}
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
async_trait,
|
|
||||||
extract::{rejection::JsonRejection, FromRequest, Request},
|
|
||||||
};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
/// Custom `Json` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Json<T>(pub T);
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<S, T> FromRequest<S> for Json<T>
|
|
||||||
where
|
|
||||||
axum::Json<T>: FromRequest<S, Rejection = JsonRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::Json::<T>::from_request(req, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Json<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Json<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
pub(crate) mod json;
|
|
||||||
pub(crate) mod path;
|
|
||||||
pub(crate) mod query;
|
|
||||||
|
|
||||||
pub(crate) use json::Json;
|
|
||||||
pub(crate) use path::Path;
|
|
||||||
pub(crate) use query::Query;
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
async_trait,
|
|
||||||
extract::{rejection::PathRejection, FromRequestParts},
|
|
||||||
};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::{request::Parts, StatusCode};
|
|
||||||
|
|
||||||
/// Custom `Path` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Path<T>(pub T);
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<S, T> FromRequestParts<S> for Path<T>
|
|
||||||
where
|
|
||||||
axum::extract::Path<T>: FromRequestParts<S, Rejection = PathRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::extract::Path::<T>::from_request_parts(parts, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_ascii_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Path<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Path<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
async_trait,
|
|
||||||
extract::{rejection::QueryRejection, FromRequestParts},
|
|
||||||
};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::{request::Parts, StatusCode};
|
|
||||||
|
|
||||||
/// Custom `Query` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Query<T>(pub T);
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<S, T> FromRequestParts<S> for Query<T>
|
|
||||||
where
|
|
||||||
axum::extract::Query<T>: FromRequestParts<S, Rejection = QueryRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::extract::Query::<T>::from_request_parts(parts, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_ascii_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Query<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Query<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,56 +1 @@
|
|||||||
use axum::{body::Body, response::Response};
|
pub mod api;
|
||||||
use compute_api::responses::{ComputeStatus, GenericAPIError};
|
|
||||||
use http::{header::CONTENT_TYPE, StatusCode};
|
|
||||||
use serde::Serialize;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
pub use server::launch_http_server;
|
|
||||||
|
|
||||||
mod extract;
|
|
||||||
mod routes;
|
|
||||||
mod server;
|
|
||||||
|
|
||||||
/// Convenience response builder for JSON responses
|
|
||||||
struct JsonResponse;
|
|
||||||
|
|
||||||
impl JsonResponse {
|
|
||||||
/// Helper for actually creating a response
|
|
||||||
fn create_response(code: StatusCode, body: impl Serialize) -> Response {
|
|
||||||
Response::builder()
|
|
||||||
.status(code)
|
|
||||||
.header(CONTENT_TYPE.as_str(), "application/json")
|
|
||||||
.body(Body::from(serde_json::to_string(&body).unwrap()))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a successful error response
|
|
||||||
pub(self) fn success(code: StatusCode, body: impl Serialize) -> Response {
|
|
||||||
assert!({
|
|
||||||
let code = code.as_u16();
|
|
||||||
|
|
||||||
(200..300).contains(&code)
|
|
||||||
});
|
|
||||||
|
|
||||||
Self::create_response(code, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an error response
|
|
||||||
pub(self) fn error(code: StatusCode, error: impl ToString) -> Response {
|
|
||||||
assert!(code.as_u16() >= 400);
|
|
||||||
|
|
||||||
let message = error.to_string();
|
|
||||||
error!(message);
|
|
||||||
|
|
||||||
Self::create_response(code, &GenericAPIError { error: message })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an error response related to the compute being in an invalid state
|
|
||||||
pub(self) fn invalid_status(status: ComputeStatus) -> Response {
|
|
||||||
Self::create_response(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
&GenericAPIError {
|
|
||||||
error: format!("invalid compute status: {status}"),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeMetrics"
|
$ref: "#/components/schemas/ComputeMetrics"
|
||||||
|
|
||||||
/metrics:
|
/metrics
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Info
|
- Info
|
||||||
@@ -537,14 +537,12 @@ components:
|
|||||||
properties:
|
properties:
|
||||||
extname:
|
extname:
|
||||||
type: string
|
type: string
|
||||||
version:
|
versions:
|
||||||
type: string
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
n_databases:
|
n_databases:
|
||||||
type: integer
|
type: integer
|
||||||
owned_by_superuser:
|
|
||||||
type: integer
|
|
||||||
|
|
||||||
SetRoleGrantsRequest:
|
SetRoleGrantsRequest:
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{checker::check_writability, compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Check that the compute is currently running.
|
|
||||||
pub(in crate::http) async fn is_writable(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match check_writability(&compute).await {
|
|
||||||
Ok(_) => JsonResponse::success(StatusCode::OK, true),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::ConfigurationRequest,
|
|
||||||
responses::{ComputeStatus, ComputeStatusResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::{ComputeNode, ParsedSpec},
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Accept spec in JSON format and request compute configuration. If anything
|
|
||||||
// goes wrong after we set the compute status to `ConfigurationPending` and
|
|
||||||
// update compute state with new spec, we basically leave compute in the
|
|
||||||
// potentially wrong state. That said, it's control-plane's responsibility to
|
|
||||||
// watch compute state after reconfiguration request and to clean restart in
|
|
||||||
// case of errors.
|
|
||||||
pub(in crate::http) async fn configure(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<ConfigurationRequest>,
|
|
||||||
) -> Response {
|
|
||||||
if !compute.live_config_allowed {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"live configuration is not allowed for this compute node".to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let pspec = match ParsedSpec::try_from(request.spec.clone()) {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(e) => return JsonResponse::error(StatusCode::BAD_REQUEST, e),
|
|
||||||
};
|
|
||||||
|
|
||||||
// XXX: wrap state update under lock in a code block. Otherwise, we will try
|
|
||||||
// to `Send` `mut state` into the spawned thread bellow, which will cause
|
|
||||||
// the following rustc error:
|
|
||||||
//
|
|
||||||
// error: future cannot be sent between threads safely
|
|
||||||
{
|
|
||||||
let mut state = compute.state.lock().unwrap();
|
|
||||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
|
||||||
return JsonResponse::invalid_status(state.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
state.pspec = Some(pspec);
|
|
||||||
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
|
||||||
drop(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn a blocking thread to wait for compute to become Running. This is
|
|
||||||
// needed to do not block the main pool of workers and be able to serve
|
|
||||||
// other requests while some particular request is waiting for compute to
|
|
||||||
// finish configuration.
|
|
||||||
let c = compute.clone();
|
|
||||||
let completed = task::spawn_blocking(move || {
|
|
||||||
let mut state = c.state.lock().unwrap();
|
|
||||||
while state.status != ComputeStatus::Running {
|
|
||||||
state = c.state_changed.wait(state).unwrap();
|
|
||||||
info!(
|
|
||||||
"waiting for compute to become {}, current status: {}",
|
|
||||||
ComputeStatus::Running,
|
|
||||||
state.status
|
|
||||||
);
|
|
||||||
|
|
||||||
if state.status == ComputeStatus::Failed {
|
|
||||||
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
|
||||||
let msg = format!("compute configuration failed: {:?}", err);
|
|
||||||
return Err(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
if let Err(e) = completed {
|
|
||||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return current compute state if everything went well.
|
|
||||||
let state = compute.state.lock().unwrap().clone();
|
|
||||||
let body = ComputeStatusResponse::from(&state);
|
|
||||||
|
|
||||||
JsonResponse::success(StatusCode::OK, body)
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{body::Body, extract::State, response::Response};
|
|
||||||
use http::{header::CONTENT_TYPE, StatusCode};
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
catalog::{get_database_schema, SchemaDumpError},
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Query, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub(in crate::http) struct DatabaseSchemaParams {
|
|
||||||
database: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a schema dump of the requested database.
|
|
||||||
pub(in crate::http) async fn get_schema_dump(
|
|
||||||
params: Query<DatabaseSchemaParams>,
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
match get_database_schema(&compute, ¶ms.database).await {
|
|
||||||
Ok(schema) => Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(CONTENT_TYPE.as_str(), "application/json")
|
|
||||||
.body(Body::from_stream(schema))
|
|
||||||
.unwrap(),
|
|
||||||
Err(SchemaDumpError::DatabaseDoesNotExist) => {
|
|
||||||
JsonResponse::error(StatusCode::NOT_FOUND, SchemaDumpError::DatabaseDoesNotExist)
|
|
||||||
}
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{catalog::get_dbs_and_roles, compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Get the databases and roles from the compute.
|
|
||||||
pub(in crate::http) async fn get_catalog_objects(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
match get_dbs_and_roles(&compute).await {
|
|
||||||
Ok(catalog_objects) => JsonResponse::success(StatusCode::OK, catalog_objects),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
extract::State,
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{
|
|
||||||
extract::{Path, Query},
|
|
||||||
JsonResponse,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub(in crate::http) struct ExtensionServerParams {
|
|
||||||
is_library: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download a remote extension.
|
|
||||||
pub(in crate::http) async fn download_extension(
|
|
||||||
Path(filename): Path<String>,
|
|
||||||
params: Query<ExtensionServerParams>,
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
// Don't even try to download extensions if no remote storage is configured
|
|
||||||
if compute.ext_remote_storage.is_none() {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"remote storage is not configured",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let ext = {
|
|
||||||
let state = compute.state.lock().unwrap();
|
|
||||||
let pspec = state.pspec.as_ref().unwrap();
|
|
||||||
let spec = &pspec.spec;
|
|
||||||
|
|
||||||
let remote_extensions = match spec.remote_extensions.as_ref() {
|
|
||||||
Some(r) => r,
|
|
||||||
None => {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::CONFLICT,
|
|
||||||
"information about remote extensions is unavailable",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
remote_extensions.get_ext(
|
|
||||||
&filename,
|
|
||||||
params.is_library.unwrap_or(false),
|
|
||||||
&compute.build_tag,
|
|
||||||
&compute.pgversion,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
match ext {
|
|
||||||
Ok((ext_name, ext_path)) => match compute.download_extension(ext_name, ext_path).await {
|
|
||||||
Ok(_) => StatusCode::OK.into_response(),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
},
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::NOT_FOUND, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::ExtensionInstallRequest,
|
|
||||||
responses::{ComputeStatus, ExtensionInstallResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Install a extension.
|
|
||||||
pub(in crate::http) async fn install_extension(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<ExtensionInstallRequest>,
|
|
||||||
) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match compute
|
|
||||||
.install_extension(
|
|
||||||
&request.extension,
|
|
||||||
&request.database,
|
|
||||||
request.version.to_string(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(version) => JsonResponse::success(
|
|
||||||
StatusCode::CREATED,
|
|
||||||
Some(ExtensionInstallResponse {
|
|
||||||
extension: request.extension.clone(),
|
|
||||||
version,
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
Err(e) => JsonResponse::error(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
format!("failed to install extension: {e}"),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tracing::info;
|
|
||||||
use utils::failpoint_support::{apply_failpoint, ConfigureFailpointsRequest};
|
|
||||||
|
|
||||||
use crate::http::{extract::Json, JsonResponse};
|
|
||||||
|
|
||||||
/// Configure failpoints for testing purposes.
|
|
||||||
pub(in crate::http) async fn configure_failpoints(
|
|
||||||
failpoints: Json<ConfigureFailpointsRequest>,
|
|
||||||
) -> Response {
|
|
||||||
if !fail::has_failpoints() {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"Cannot manage failpoints because neon was compiled without failpoints support",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for fp in &*failpoints {
|
|
||||||
info!("cfg failpoint: {} {}", fp.name, fp.actions);
|
|
||||||
|
|
||||||
// We recognize one extra "action" that's not natively recognized
|
|
||||||
// by the failpoints crate: exit, to immediately kill the process
|
|
||||||
let cfg_result = apply_failpoint(&fp.name, &fp.actions);
|
|
||||||
|
|
||||||
if let Err(e) = cfg_result {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::BAD_REQUEST,
|
|
||||||
format!("failed to configure failpoints: {e}"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
StatusCode::OK.into_response()
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::SetRoleGrantsRequest,
|
|
||||||
responses::{ComputeStatus, SetRoleGrantsResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Add grants for a role.
|
|
||||||
pub(in crate::http) async fn add_grant(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<SetRoleGrantsRequest>,
|
|
||||||
) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match compute
|
|
||||||
.set_role_grants(
|
|
||||||
&request.database,
|
|
||||||
&request.schema,
|
|
||||||
&request.privileges,
|
|
||||||
&request.role,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(()) => JsonResponse::success(
|
|
||||||
StatusCode::CREATED,
|
|
||||||
Some(SetRoleGrantsResponse {
|
|
||||||
database: request.database.clone(),
|
|
||||||
schema: request.schema.clone(),
|
|
||||||
role: request.role.clone(),
|
|
||||||
privileges: request.privileges.clone(),
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
Err(e) => JsonResponse::error(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
format!("failed to grant role privileges to the schema: {e}"),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
use axum::response::Response;
|
|
||||||
use compute_api::responses::InfoResponse;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::http::JsonResponse;
|
|
||||||
|
|
||||||
/// Get information about the physical characteristics about the compute.
|
|
||||||
pub(in crate::http) async fn get_info() -> Response {
|
|
||||||
let num_cpus = num_cpus::get_physical();
|
|
||||||
JsonResponse::success(StatusCode::OK, &InfoResponse { num_cpus })
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Collect current Postgres usage insights.
|
|
||||||
pub(in crate::http) async fn get_insights(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
let insights = compute.collect_insights().await;
|
|
||||||
JsonResponse::success(StatusCode::OK, insights)
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::task;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse, installed_extensions};
|
|
||||||
|
|
||||||
/// Get a list of installed extensions.
|
|
||||||
pub(in crate::http) async fn get_installed_extensions(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
let conf = compute.get_conn_conf(None);
|
|
||||||
let res = task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(installed_extensions) => {
|
|
||||||
JsonResponse::success(StatusCode::OK, Some(installed_extensions))
|
|
||||||
}
|
|
||||||
Err(e) => JsonResponse::error(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
format!("failed to get list of installed extensions: {e}"),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
use axum::{body::Body, response::Response};
|
|
||||||
use http::header::CONTENT_TYPE;
|
|
||||||
use http::StatusCode;
|
|
||||||
use metrics::proto::MetricFamily;
|
|
||||||
use metrics::Encoder;
|
|
||||||
use metrics::TextEncoder;
|
|
||||||
|
|
||||||
use crate::{http::JsonResponse, installed_extensions};
|
|
||||||
|
|
||||||
/// Expose Prometheus metrics.
|
|
||||||
pub(in crate::http) async fn get_metrics() -> Response {
|
|
||||||
// When we call TextEncoder::encode() below, it will immediately return an
|
|
||||||
// error if a metric family has no metrics, so we need to preemptively
|
|
||||||
// filter out metric families with no metrics.
|
|
||||||
let metrics = installed_extensions::collect()
|
|
||||||
.into_iter()
|
|
||||||
.filter(|m| !m.get_metric().is_empty())
|
|
||||||
.collect::<Vec<MetricFamily>>();
|
|
||||||
|
|
||||||
let encoder = TextEncoder::new();
|
|
||||||
let mut buffer = vec![];
|
|
||||||
|
|
||||||
if let Err(e) = encoder.encode(&metrics, &mut buffer) {
|
|
||||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(CONTENT_TYPE, encoder.format_type())
|
|
||||||
.body(Body::from(buffer))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Get startup metrics.
|
|
||||||
pub(in crate::http) async fn get_metrics(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let metrics = compute.state.lock().unwrap().metrics.clone();
|
|
||||||
JsonResponse::success(StatusCode::OK, metrics)
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
use compute_api::responses::ComputeStatusResponse;
|
|
||||||
|
|
||||||
use crate::compute::ComputeState;
|
|
||||||
|
|
||||||
pub(in crate::http) mod check_writability;
|
|
||||||
pub(in crate::http) mod configure;
|
|
||||||
pub(in crate::http) mod database_schema;
|
|
||||||
pub(in crate::http) mod dbs_and_roles;
|
|
||||||
pub(in crate::http) mod extension_server;
|
|
||||||
pub(in crate::http) mod extensions;
|
|
||||||
pub(in crate::http) mod failpoints;
|
|
||||||
pub(in crate::http) mod grants;
|
|
||||||
pub(in crate::http) mod info;
|
|
||||||
pub(in crate::http) mod insights;
|
|
||||||
pub(in crate::http) mod installed_extensions;
|
|
||||||
pub(in crate::http) mod metrics;
|
|
||||||
pub(in crate::http) mod metrics_json;
|
|
||||||
pub(in crate::http) mod status;
|
|
||||||
pub(in crate::http) mod terminate;
|
|
||||||
|
|
||||||
impl From<&ComputeState> for ComputeStatusResponse {
|
|
||||||
fn from(state: &ComputeState) -> Self {
|
|
||||||
ComputeStatusResponse {
|
|
||||||
start_time: state.start_time,
|
|
||||||
tenant: state
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.map(|pspec| pspec.tenant_id.to_string()),
|
|
||||||
timeline: state
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.map(|pspec| pspec.timeline_id.to_string()),
|
|
||||||
status: state.status,
|
|
||||||
last_active: state.last_active,
|
|
||||||
error: state.error.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
use std::{ops::Deref, sync::Arc};
|
|
||||||
|
|
||||||
use axum::{extract::State, http::StatusCode, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatusResponse;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Retrieve the state of the comute.
|
|
||||||
pub(in crate::http) async fn get_status(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let state = compute.state.lock().unwrap();
|
|
||||||
let body = ComputeStatusResponse::from(state.deref());
|
|
||||||
|
|
||||||
JsonResponse::success(StatusCode::OK, body)
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
extract::State,
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::{forward_termination_signal, ComputeNode},
|
|
||||||
http::JsonResponse,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Terminate the compute.
|
|
||||||
pub(in crate::http) async fn terminate(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
{
|
|
||||||
let mut state = compute.state.lock().unwrap();
|
|
||||||
if state.status == ComputeStatus::Terminated {
|
|
||||||
return StatusCode::CREATED.into_response();
|
|
||||||
}
|
|
||||||
|
|
||||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
|
||||||
return JsonResponse::invalid_status(state.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
|
|
||||||
drop(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
forward_termination_signal();
|
|
||||||
info!("sent signal and notified waiters");
|
|
||||||
|
|
||||||
// Spawn a blocking thread to wait for compute to become Terminated.
|
|
||||||
// This is needed to do not block the main pool of workers and
|
|
||||||
// be able to serve other requests while some particular request
|
|
||||||
// is waiting for compute to finish configuration.
|
|
||||||
let c = compute.clone();
|
|
||||||
task::spawn_blocking(move || {
|
|
||||||
let mut state = c.state.lock().unwrap();
|
|
||||||
while state.status != ComputeStatus::Terminated {
|
|
||||||
state = c.state_changed.wait(state).unwrap();
|
|
||||||
info!(
|
|
||||||
"waiting for compute to become {}, current status: {:?}",
|
|
||||||
ComputeStatus::Terminated,
|
|
||||||
state.status
|
|
||||||
);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
info!("terminated Postgres");
|
|
||||||
|
|
||||||
StatusCode::OK.into_response()
|
|
||||||
}
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
use std::{
|
|
||||||
net::{IpAddr, Ipv6Addr, SocketAddr},
|
|
||||||
sync::{
|
|
||||||
atomic::{AtomicU64, Ordering},
|
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
thread,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use axum::{
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
routing::{get, post},
|
|
||||||
Router,
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tower::ServiceBuilder;
|
|
||||||
use tower_http::{
|
|
||||||
request_id::{MakeRequestId, PropagateRequestIdLayer, RequestId, SetRequestIdLayer},
|
|
||||||
trace::TraceLayer,
|
|
||||||
};
|
|
||||||
use tracing::{debug, error, info, Span};
|
|
||||||
|
|
||||||
use super::routes::{
|
|
||||||
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
|
|
||||||
grants, info as info_route, insights, installed_extensions, metrics, metrics_json, status,
|
|
||||||
terminate,
|
|
||||||
};
|
|
||||||
use crate::compute::ComputeNode;
|
|
||||||
|
|
||||||
async fn handle_404() -> Response {
|
|
||||||
StatusCode::NOT_FOUND.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
struct ComputeMakeRequestId(Arc<AtomicU64>);
|
|
||||||
|
|
||||||
impl MakeRequestId for ComputeMakeRequestId {
|
|
||||||
fn make_request_id<B>(
|
|
||||||
&mut self,
|
|
||||||
_request: &http::Request<B>,
|
|
||||||
) -> Option<tower_http::request_id::RequestId> {
|
|
||||||
let request_id = self
|
|
||||||
.0
|
|
||||||
.fetch_add(1, Ordering::SeqCst)
|
|
||||||
.to_string()
|
|
||||||
.parse()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Some(RequestId::new(request_id))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the HTTP server and wait on it forever.
|
|
||||||
#[tokio::main]
|
|
||||||
async fn serve(port: u16, compute: Arc<ComputeNode>) {
|
|
||||||
const X_REQUEST_ID: &str = "x-request-id";
|
|
||||||
|
|
||||||
let mut app = Router::new()
|
|
||||||
.route("/check_writability", post(check_writability::is_writable))
|
|
||||||
.route("/configure", post(configure::configure))
|
|
||||||
.route("/database_schema", get(database_schema::get_schema_dump))
|
|
||||||
.route("/dbs_and_roles", get(dbs_and_roles::get_catalog_objects))
|
|
||||||
.route(
|
|
||||||
"/extension_server/*filename",
|
|
||||||
post(extension_server::download_extension),
|
|
||||||
)
|
|
||||||
.route("/extensions", post(extensions::install_extension))
|
|
||||||
.route("/grants", post(grants::add_grant))
|
|
||||||
.route("/info", get(info_route::get_info))
|
|
||||||
.route("/insights", get(insights::get_insights))
|
|
||||||
.route(
|
|
||||||
"/installed_extensions",
|
|
||||||
get(installed_extensions::get_installed_extensions),
|
|
||||||
)
|
|
||||||
.route("/metrics", get(metrics::get_metrics))
|
|
||||||
.route("/metrics.json", get(metrics_json::get_metrics))
|
|
||||||
.route("/status", get(status::get_status))
|
|
||||||
.route("/terminate", post(terminate::terminate))
|
|
||||||
.fallback(handle_404)
|
|
||||||
.layer(
|
|
||||||
ServiceBuilder::new()
|
|
||||||
.layer(SetRequestIdLayer::x_request_id(
|
|
||||||
ComputeMakeRequestId::default(),
|
|
||||||
))
|
|
||||||
.layer(
|
|
||||||
TraceLayer::new_for_http()
|
|
||||||
.on_request(|request: &http::Request<_>, _span: &Span| {
|
|
||||||
let request_id = request
|
|
||||||
.headers()
|
|
||||||
.get(X_REQUEST_ID)
|
|
||||||
.unwrap()
|
|
||||||
.to_str()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
match request.uri().path() {
|
|
||||||
"/metrics" => {
|
|
||||||
debug!(%request_id, "{} {}", request.method(), request.uri())
|
|
||||||
}
|
|
||||||
_ => info!(%request_id, "{} {}", request.method(), request.uri()),
|
|
||||||
};
|
|
||||||
})
|
|
||||||
.on_response(
|
|
||||||
|response: &http::Response<_>, latency: Duration, _span: &Span| {
|
|
||||||
let request_id = response
|
|
||||||
.headers()
|
|
||||||
.get(X_REQUEST_ID)
|
|
||||||
.unwrap()
|
|
||||||
.to_str()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
info!(
|
|
||||||
%request_id,
|
|
||||||
code = response.status().as_u16(),
|
|
||||||
latency = latency.as_millis()
|
|
||||||
)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.layer(PropagateRequestIdLayer::x_request_id()),
|
|
||||||
)
|
|
||||||
.with_state(compute);
|
|
||||||
|
|
||||||
// Add in any testing support
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
use super::routes::failpoints;
|
|
||||||
|
|
||||||
app = app.route("/failpoints", post(failpoints::configure_failpoints))
|
|
||||||
}
|
|
||||||
|
|
||||||
// This usually binds to both IPv4 and IPv6 on Linux, see
|
|
||||||
// https://github.com/rust-lang/rust/pull/34440 for more information
|
|
||||||
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
|
|
||||||
let listener = match TcpListener::bind(&addr).await {
|
|
||||||
Ok(listener) => listener,
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"failed to bind the compute_ctl HTTP server to port {}: {}",
|
|
||||||
port, e
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(local_addr) = listener.local_addr() {
|
|
||||||
info!("compute_ctl HTTP server listening on {}", local_addr);
|
|
||||||
} else {
|
|
||||||
info!("compute_ctl HTTP server listening on port {}", port);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = axum::serve(listener, app).await {
|
|
||||||
error!("compute_ctl HTTP server error: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Launch a separate HTTP server thread and return its `JoinHandle`.
|
|
||||||
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
|
||||||
let state = Arc::clone(state);
|
|
||||||
|
|
||||||
Ok(thread::Builder::new()
|
|
||||||
.name("http-server".into())
|
|
||||||
.spawn(move || serve(port, state))?)
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||||
use metrics::proto::MetricFamily;
|
use metrics::proto::MetricFamily;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
@@ -37,77 +38,61 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
|
|||||||
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
||||||
///
|
///
|
||||||
/// Same extension can be installed in multiple databases with different versions,
|
/// Same extension can be installed in multiple databases with different versions,
|
||||||
/// so we report a separate metric (number of databases where it is installed)
|
/// we only keep the highest and lowest version across all databases.
|
||||||
/// for each extension version.
|
|
||||||
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
||||||
conf.application_name("compute_ctl:get_installed_extensions");
|
conf.application_name("compute_ctl:get_installed_extensions");
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
let databases: Vec<String> = list_dbs(&mut client)?;
|
let databases: Vec<String> = list_dbs(&mut client)?;
|
||||||
|
|
||||||
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
|
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
||||||
for db in databases.iter() {
|
for db in databases.iter() {
|
||||||
conf.dbname(db);
|
conf.dbname(db);
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
let extensions: Vec<(String, String, i32)> = db_client
|
let extensions: Vec<(String, String)> = db_client
|
||||||
.query(
|
.query(
|
||||||
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension",
|
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| {
|
.map(|row| (row.get("extname"), row.get("extversion")))
|
||||||
(
|
|
||||||
row.get("extname"),
|
|
||||||
row.get("extversion"),
|
|
||||||
row.get("extowner"),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (extname, v, extowner) in extensions.iter() {
|
for (extname, v) in extensions.iter() {
|
||||||
let version = v.to_string();
|
let version = v.to_string();
|
||||||
|
|
||||||
// check if the extension is owned by superuser
|
// increment the number of databases where the version of extension is installed
|
||||||
// 10 is the oid of superuser
|
INSTALLED_EXTENSIONS
|
||||||
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
|
.with_label_values(&[extname, &version])
|
||||||
|
.inc();
|
||||||
|
|
||||||
extensions_map
|
extensions_map
|
||||||
.entry((
|
.entry(extname.to_string())
|
||||||
extname.to_string(),
|
|
||||||
version.clone(),
|
|
||||||
owned_by_superuser.to_string(),
|
|
||||||
))
|
|
||||||
.and_modify(|e| {
|
.and_modify(|e| {
|
||||||
|
e.versions.insert(version.clone());
|
||||||
// count the number of databases where the extension is installed
|
// count the number of databases where the extension is installed
|
||||||
e.n_databases += 1;
|
e.n_databases += 1;
|
||||||
})
|
})
|
||||||
.or_insert(InstalledExtension {
|
.or_insert(InstalledExtension {
|
||||||
extname: extname.to_string(),
|
extname: extname.to_string(),
|
||||||
version: version.clone(),
|
versions: HashSet::from([version.clone()]),
|
||||||
n_databases: 1,
|
n_databases: 1,
|
||||||
owned_by_superuser: owned_by_superuser.to_string(),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (key, ext) in extensions_map.iter() {
|
let res = InstalledExtensions {
|
||||||
let (extname, version, owned_by_superuser) = key;
|
|
||||||
let n_databases = ext.n_databases as u64;
|
|
||||||
|
|
||||||
INSTALLED_EXTENSIONS
|
|
||||||
.with_label_values(&[extname, version, owned_by_superuser])
|
|
||||||
.set(n_databases);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(InstalledExtensions {
|
|
||||||
extensions: extensions_map.into_values().collect(),
|
extensions: extensions_map.into_values().collect(),
|
||||||
})
|
};
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
register_uint_gauge_vec!(
|
register_uint_gauge_vec!(
|
||||||
"compute_installed_extensions",
|
"compute_installed_extensions",
|
||||||
"Number of databases where the version of extension is installed",
|
"Number of databases where the version of extension is installed",
|
||||||
&["extension_name", "version", "owned_by_superuser"]
|
&["extension_name", "version"]
|
||||||
)
|
)
|
||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
#![deny(unsafe_code)]
|
#![deny(unsafe_code)]
|
||||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||||
|
|
||||||
|
extern crate hyper0 as hyper;
|
||||||
|
|
||||||
pub mod checker;
|
pub mod checker;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod configurator;
|
pub mod configurator;
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use fail::fail_point;
|
use postgres::Client;
|
||||||
use postgres::{Client, Transaction};
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
/// Runs a series of migrations on a target database
|
|
||||||
pub(crate) struct MigrationRunner<'m> {
|
pub(crate) struct MigrationRunner<'m> {
|
||||||
client: &'m mut Client,
|
client: &'m mut Client,
|
||||||
migrations: &'m [&'m str],
|
migrations: &'m [&'m str],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'m> MigrationRunner<'m> {
|
impl<'m> MigrationRunner<'m> {
|
||||||
/// Create a new migration runner
|
|
||||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
||||||
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
||||||
assert!(migrations.len() + 1 < i64::MAX as usize);
|
assert!(migrations.len() + 1 < i64::MAX as usize);
|
||||||
@@ -18,110 +15,87 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Self { client, migrations }
|
Self { client, migrations }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the current value neon_migration.migration_id
|
|
||||||
fn get_migration_id(&mut self) -> Result<i64> {
|
fn get_migration_id(&mut self) -> Result<i64> {
|
||||||
|
let query = "SELECT id FROM neon_migration.migration_id";
|
||||||
let row = self
|
let row = self
|
||||||
.client
|
.client
|
||||||
.query_one("SELECT id FROM neon_migration.migration_id", &[])?;
|
.query_one(query, &[])
|
||||||
|
.context("run_migrations get migration_id")?;
|
||||||
|
|
||||||
Ok(row.get::<&str, i64>("id"))
|
Ok(row.get::<&str, i64>("id"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the neon_migration.migration_id value
|
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
||||||
///
|
let setval = format!("UPDATE neon_migration.migration_id SET id={}", migration_id);
|
||||||
/// This function has a fail point called compute-migration, which can be
|
|
||||||
/// used if you would like to fail the application of a series of migrations
|
|
||||||
/// at some point.
|
|
||||||
fn update_migration_id(txn: &mut Transaction, migration_id: i64) -> Result<()> {
|
|
||||||
// We use this fail point in order to check that failing in the
|
|
||||||
// middle of applying a series of migrations fails in an expected
|
|
||||||
// manner
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
let fail = (|| {
|
|
||||||
fail_point!("compute-migration", |fail_migration_id| {
|
|
||||||
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
false
|
self.client
|
||||||
})();
|
.simple_query(&setval)
|
||||||
|
.context("run_migrations update id")?;
|
||||||
if fail {
|
|
||||||
return Err(anyhow::anyhow!(format!(
|
|
||||||
"migration {} was configured to fail because of a failpoint",
|
|
||||||
migration_id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.query(
|
|
||||||
"UPDATE neon_migration.migration_id SET id = $1",
|
|
||||||
&[&migration_id],
|
|
||||||
)
|
|
||||||
.with_context(|| format!("update neon_migration.migration_id to {migration_id}"))?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepare the migrations the target database for handling migrations
|
fn prepare_migrations(&mut self) -> Result<()> {
|
||||||
fn prepare_database(&mut self) -> Result<()> {
|
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")?;
|
|
||||||
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)")?;
|
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
||||||
self.client.simple_query(
|
self.client.simple_query(query)?;
|
||||||
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
|
|
||||||
)?;
|
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")?;
|
|
||||||
self.client
|
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
||||||
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")?;
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
|
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
||||||
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Run an individual migration
|
|
||||||
fn run_migration(txn: &mut Transaction, migration_id: i64, migration: &str) -> Result<()> {
|
|
||||||
if migration.starts_with("-- SKIP") {
|
|
||||||
info!("Skipping migration id={}", migration_id);
|
|
||||||
|
|
||||||
// Even though we are skipping the migration, updating the
|
|
||||||
// migration ID should help keep logic easy to understand when
|
|
||||||
// trying to understand the state of a cluster.
|
|
||||||
Self::update_migration_id(txn, migration_id)?;
|
|
||||||
} else {
|
|
||||||
info!("Running migration id={}:\n{}\n", migration_id, migration);
|
|
||||||
|
|
||||||
txn.simple_query(migration)
|
|
||||||
.with_context(|| format!("apply migration {migration_id}"))?;
|
|
||||||
|
|
||||||
Self::update_migration_id(txn, migration_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the configured set of migrations
|
|
||||||
pub fn run_migrations(mut self) -> Result<()> {
|
pub fn run_migrations(mut self) -> Result<()> {
|
||||||
self.prepare_database()
|
self.prepare_migrations()?;
|
||||||
.context("prepare database to handle migrations")?;
|
|
||||||
|
|
||||||
let mut current_migration = self.get_migration_id()? as usize;
|
let mut current_migration = self.get_migration_id()? as usize;
|
||||||
while current_migration < self.migrations.len() {
|
while current_migration < self.migrations.len() {
|
||||||
// The index lags the migration ID by 1, so the current migration
|
macro_rules! migration_id {
|
||||||
// ID is also the next index
|
($cm:expr) => {
|
||||||
let migration_id = (current_migration + 1) as i64;
|
($cm + 1) as i64
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
let mut txn = self
|
let migration = self.migrations[current_migration];
|
||||||
.client
|
|
||||||
.transaction()
|
|
||||||
.with_context(|| format!("begin transaction for migration {migration_id}"))?;
|
|
||||||
|
|
||||||
Self::run_migration(&mut txn, migration_id, self.migrations[current_migration])
|
if migration.starts_with("-- SKIP") {
|
||||||
.with_context(|| format!("running migration {migration_id}"))?;
|
info!("Skipping migration id={}", migration_id!(current_migration));
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Running migration id={}:\n{}\n",
|
||||||
|
migration_id!(current_migration),
|
||||||
|
migration
|
||||||
|
);
|
||||||
|
|
||||||
txn.commit()
|
self.client
|
||||||
.with_context(|| format!("commit transaction for migration {migration_id}"))?;
|
.simple_query("BEGIN")
|
||||||
|
.context("begin migration")?;
|
||||||
|
|
||||||
info!("Finished migration id={}", migration_id);
|
self.client.simple_query(migration).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"run_migrations migration id={}",
|
||||||
|
migration_id!(current_migration)
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Migration IDs start at 1
|
||||||
|
self.update_migration_id(migration_id!(current_migration))?;
|
||||||
|
|
||||||
|
self.client
|
||||||
|
.simple_query("COMMIT")
|
||||||
|
.context("commit migration")?;
|
||||||
|
|
||||||
|
info!("Finished migration id={}", migration_id!(current_migration));
|
||||||
|
}
|
||||||
|
|
||||||
current_migration += 1;
|
current_migration += 1;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
bypassrls boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT rolbypassrls INTO bypassrls FROM pg_roles WHERE rolname = 'neon_superuser';
|
|
||||||
IF NOT bypassrls THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot bypass RLS';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
role record;
|
|
||||||
BEGIN
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolinherit AS inherit
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
LOOP
|
|
||||||
IF NOT role.inherit THEN
|
|
||||||
RAISE EXCEPTION '% cannot inherit', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolbypassrls AS bypassrls
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE NOT pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
AND NOT starts_with(rolname, 'pg_')
|
|
||||||
LOOP
|
|
||||||
IF role.bypassrls THEN
|
|
||||||
RAISE EXCEPTION '% can bypass RLS', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
END $$;
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF (SELECT current_setting('server_version_num')::numeric < 160000) THEN
|
|
||||||
RETURN;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT (SELECT pg_has_role('neon_superuser', 'pg_create_subscription', 'member')) THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_create_subscription';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
monitor record;
|
|
||||||
BEGIN
|
|
||||||
SELECT pg_has_role('neon_superuser', 'pg_monitor', 'member') AS member,
|
|
||||||
admin_option AS admin
|
|
||||||
INTO monitor
|
|
||||||
FROM pg_auth_members
|
|
||||||
WHERE roleid = 'pg_monitor'::regrole
|
|
||||||
AND member = 'pg_monitor'::regrole;
|
|
||||||
|
|
||||||
IF NOT monitor.member THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT monitor.admin THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT bool_and(has_function_privilege('neon_superuser', oid, 'execute'))
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname IN ('pg_export_snapshot', 'pg_log_standby_snapshot')
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute both pg_export_snapshot and pg_log_standby_snapshot';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT has_function_privilege('neon_superuser', oid, 'execute')
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname = 'pg_show_replication_origin_status'
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_show_replication_origin_status';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -47,7 +47,6 @@ pub enum PerDatabasePhase {
|
|||||||
DeleteDBRoleReferences,
|
DeleteDBRoleReferences,
|
||||||
ChangeSchemaPerms,
|
ChangeSchemaPerms,
|
||||||
HandleAnonExtension,
|
HandleAnonExtension,
|
||||||
DropSubscriptionsForDeletedDatabases,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -75,7 +74,7 @@ pub struct MutableApplyContext {
|
|||||||
pub dbs: HashMap<String, Database>,
|
pub dbs: HashMap<String, Database>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply the operations that belong to the given spec apply phase.
|
/// Appply the operations that belong to the given spec apply phase.
|
||||||
///
|
///
|
||||||
/// Commands within a single phase are executed in order of Iterator yield.
|
/// Commands within a single phase are executed in order of Iterator yield.
|
||||||
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
|
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
|
||||||
@@ -327,12 +326,13 @@ async fn get_operations<'a>(
|
|||||||
|
|
||||||
// Use FORCE to drop database even if there are active connections.
|
// Use FORCE to drop database even if there are active connections.
|
||||||
// We run this from `cloud_admin`, so it should have enough privileges.
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
//
|
|
||||||
// NB: there could be other db states, which prevent us from dropping
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
// the database. For example, if db is used by any active subscription
|
// the database. For example, if db is used by any active subscription
|
||||||
// or replication slot.
|
// or replication slot.
|
||||||
// Such cases are handled in the DropSubscriptionsForDeletedDatabases
|
// TODO: deal with it once we allow logical replication. Proper fix should
|
||||||
// phase. We do all the cleanup before actually dropping the database.
|
// involve returning an error code to the control plane, so it could
|
||||||
|
// figure out that this is a non-retryable error, return it to the user
|
||||||
|
// and fail operation permanently.
|
||||||
let drop_db_query: String = format!(
|
let drop_db_query: String = format!(
|
||||||
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
&op.name.pg_quote()
|
&op.name.pg_quote()
|
||||||
@@ -444,30 +444,6 @@ async fn get_operations<'a>(
|
|||||||
}
|
}
|
||||||
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
||||||
match subphase {
|
match subphase {
|
||||||
PerDatabasePhase::DropSubscriptionsForDeletedDatabases => {
|
|
||||||
match &db {
|
|
||||||
DB::UserDB(db) => {
|
|
||||||
let drop_subscription_query: String = format!(
|
|
||||||
include_str!("sql/drop_subscription_for_drop_dbs.sql"),
|
|
||||||
datname_str = escape_literal(&db.name),
|
|
||||||
);
|
|
||||||
|
|
||||||
let operations = vec![Operation {
|
|
||||||
query: drop_subscription_query,
|
|
||||||
comment: Some(format!(
|
|
||||||
"optionally dropping subscriptions for DB {}",
|
|
||||||
db.name,
|
|
||||||
)),
|
|
||||||
}]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
// skip this cleanup for the system databases
|
|
||||||
// because users can't drop them
|
|
||||||
DB::SystemDB => Ok(Box::new(empty())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PerDatabasePhase::DeleteDBRoleReferences => {
|
PerDatabasePhase::DeleteDBRoleReferences => {
|
||||||
let ctx = ctx.read().await;
|
let ctx = ctx.read().await;
|
||||||
|
|
||||||
@@ -498,19 +474,7 @@ async fn get_operations<'a>(
|
|||||||
),
|
),
|
||||||
comment: None,
|
comment: None,
|
||||||
},
|
},
|
||||||
// Revoke some potentially blocking privileges (Neon-specific currently)
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
include_str!("sql/pre_drop_role_revoke_privileges.sql"),
|
|
||||||
role_name = quoted,
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
// This now will only drop privileges of the role
|
// This now will only drop privileges of the role
|
||||||
// TODO: this is obviously not 100% true because of the above case,
|
|
||||||
// there could be still some privileges that are not revoked. Maybe this
|
|
||||||
// only drops privileges that were granted *by this* role, not *to this* role,
|
|
||||||
// but this has to be checked.
|
|
||||||
Operation {
|
Operation {
|
||||||
query: format!("DROP OWNED BY {}", quoted),
|
query: format!("DROP OWNED BY {}", quoted),
|
||||||
comment: None,
|
comment: None,
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
subname TEXT;
|
|
||||||
BEGIN
|
|
||||||
FOR subname IN SELECT pg_subscription.subname FROM pg_subscription WHERE subdbid = (SELECT oid FROM pg_database WHERE datname = {datname_str}) LOOP
|
|
||||||
EXECUTE format('ALTER SUBSCRIPTION %I DISABLE;', subname);
|
|
||||||
EXECUTE format('ALTER SUBSCRIPTION %I SET (slot_name = NONE);', subname);
|
|
||||||
EXECUTE format('DROP SUBSCRIPTION %I;', subname);
|
|
||||||
END LOOP;
|
|
||||||
END;
|
|
||||||
$$;
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
SET SESSION ROLE neon_superuser;
|
|
||||||
|
|
||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
schema TEXT;
|
|
||||||
revoke_query TEXT;
|
|
||||||
BEGIN
|
|
||||||
FOR schema IN
|
|
||||||
SELECT schema_name
|
|
||||||
FROM information_schema.schemata
|
|
||||||
-- So far, we only had issues with 'public' schema. Probably, because we do some additional grants,
|
|
||||||
-- e.g., make DB owner the owner of 'public' schema automatically (when created via API).
|
|
||||||
-- See https://github.com/neondatabase/cloud/issues/13582 for the context.
|
|
||||||
-- Still, keep the loop because i) it efficiently handles the case when there is no 'public' schema,
|
|
||||||
-- ii) it's easy to add more schemas to the list if needed.
|
|
||||||
WHERE schema_name IN ('public')
|
|
||||||
LOOP
|
|
||||||
revoke_query := format(
|
|
||||||
'REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %I FROM {role_name} GRANTED BY neon_superuser;',
|
|
||||||
schema
|
|
||||||
);
|
|
||||||
|
|
||||||
EXECUTE revoke_query;
|
|
||||||
END LOOP;
|
|
||||||
END;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
RESET ROLE;
|
|
||||||
@@ -274,7 +274,6 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|||||||
for env_key in [
|
for env_key in [
|
||||||
"AWS_ACCESS_KEY_ID",
|
"AWS_ACCESS_KEY_ID",
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
"AWS_SESSION_TOKEN",
|
|
||||||
"AWS_PROFILE",
|
"AWS_PROFILE",
|
||||||
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
|
||||||
"HOME",
|
"HOME",
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use control_plane::storage_controller::{
|
|||||||
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
|
||||||
};
|
};
|
||||||
use control_plane::{broker, local_env};
|
use control_plane::{broker, local_env};
|
||||||
use nix::fcntl::{flock, FlockArg};
|
|
||||||
use pageserver_api::config::{
|
use pageserver_api::config::{
|
||||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
||||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
||||||
@@ -37,8 +36,6 @@ use safekeeper_api::{
|
|||||||
};
|
};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::{BTreeSet, HashMap};
|
use std::collections::{BTreeSet, HashMap};
|
||||||
use std::fs::File;
|
|
||||||
use std::os::fd::AsRawFd;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -692,21 +689,6 @@ struct TimelineTreeEl {
|
|||||||
pub children: BTreeSet<TimelineId>,
|
pub children: BTreeSet<TimelineId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A flock-based guard over the neon_local repository directory
|
|
||||||
struct RepoLock {
|
|
||||||
_file: File,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RepoLock {
|
|
||||||
fn new() -> Result<Self> {
|
|
||||||
let repo_dir = File::open(local_env::base_path())?;
|
|
||||||
let repo_dir_fd = repo_dir.as_raw_fd();
|
|
||||||
flock(repo_dir_fd, FlockArg::LockExclusive)?;
|
|
||||||
|
|
||||||
Ok(Self { _file: repo_dir })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main entry point for the 'neon_local' CLI utility
|
// Main entry point for the 'neon_local' CLI utility
|
||||||
//
|
//
|
||||||
// This utility helps to manage neon installation. That includes following:
|
// This utility helps to manage neon installation. That includes following:
|
||||||
@@ -718,14 +700,9 @@ fn main() -> Result<()> {
|
|||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
// Check for 'neon init' command first.
|
// Check for 'neon init' command first.
|
||||||
let (subcommand_result, _lock) = if let NeonLocalCmd::Init(args) = cli.command {
|
let subcommand_result = if let NeonLocalCmd::Init(args) = cli.command {
|
||||||
(handle_init(&args).map(|env| Some(Cow::Owned(env))), None)
|
handle_init(&args).map(|env| Some(Cow::Owned(env)))
|
||||||
} else {
|
} else {
|
||||||
// This tool uses a collection of simple files to store its state, and consequently
|
|
||||||
// it is not generally safe to run multiple commands concurrently. Rather than expect
|
|
||||||
// all callers to know this, use a lock file to protect against concurrent execution.
|
|
||||||
let _repo_lock = RepoLock::new().unwrap();
|
|
||||||
|
|
||||||
// all other commands need an existing config
|
// all other commands need an existing config
|
||||||
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
|
||||||
let original_env = env.clone();
|
let original_env = env.clone();
|
||||||
@@ -751,12 +728,11 @@ fn main() -> Result<()> {
|
|||||||
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
|
||||||
};
|
};
|
||||||
|
|
||||||
let subcommand_result = if &original_env != env {
|
if &original_env != env {
|
||||||
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
|
||||||
} else {
|
} else {
|
||||||
subcommand_result.map(|()| None)
|
subcommand_result.map(|()| None)
|
||||||
};
|
}
|
||||||
(subcommand_result, Some(_repo_lock))
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match subcommand_result {
|
match subcommand_result {
|
||||||
@@ -946,7 +922,7 @@ fn handle_init(args: &InitCmdArgs) -> anyhow::Result<LocalEnv> {
|
|||||||
} else {
|
} else {
|
||||||
// User (likely interactive) did not provide a description of the environment, give them the default
|
// User (likely interactive) did not provide a description of the environment, give them the default
|
||||||
NeonLocalInitConf {
|
NeonLocalInitConf {
|
||||||
control_plane_api: Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap()),
|
control_plane_api: Some(Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap())),
|
||||||
broker: NeonBroker {
|
broker: NeonBroker {
|
||||||
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
||||||
},
|
},
|
||||||
@@ -1742,15 +1718,18 @@ async fn handle_start_all_impl(
|
|||||||
broker::start_broker_process(env, &retry_timeout).await
|
broker::start_broker_process(env, &retry_timeout).await
|
||||||
});
|
});
|
||||||
|
|
||||||
js.spawn(async move {
|
// Only start the storage controller if the pageserver is configured to need it
|
||||||
let storage_controller = StorageController::from_env(env);
|
if env.control_plane_api.is_some() {
|
||||||
storage_controller
|
js.spawn(async move {
|
||||||
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
let storage_controller = StorageController::from_env(env);
|
||||||
retry_timeout,
|
storage_controller
|
||||||
))
|
.start(NeonStorageControllerStartArgs::with_default_instance_id(
|
||||||
.await
|
retry_timeout,
|
||||||
.map_err(|e| e.context("start storage_controller"))
|
))
|
||||||
});
|
.await
|
||||||
|
.map_err(|e| e.context("start storage_controller"))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
for ps_conf in &env.pageservers {
|
for ps_conf in &env.pageservers {
|
||||||
js.spawn(async move {
|
js.spawn(async move {
|
||||||
@@ -1795,6 +1774,10 @@ async fn neon_start_status_check(
|
|||||||
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
|
||||||
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
if env.control_plane_api.is_none() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let storcon = StorageController::from_env(env);
|
let storcon = StorageController::from_env(env);
|
||||||
|
|
||||||
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ use crate::local_env::LocalEnv;
|
|||||||
use crate::postgresql_conf::PostgresConf;
|
use crate::postgresql_conf::PostgresConf;
|
||||||
use crate::storage_controller::StorageController;
|
use crate::storage_controller::StorageController;
|
||||||
|
|
||||||
use compute_api::responses::{ComputeStatus, ComputeStatusResponse};
|
use compute_api::responses::{ComputeState, ComputeStatus};
|
||||||
use compute_api::spec::{Cluster, ComputeFeature, ComputeMode, ComputeSpec};
|
use compute_api::spec::{Cluster, ComputeFeature, ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
// contents of a endpoint.json file
|
// contents of a endpoint.json file
|
||||||
@@ -316,10 +316,6 @@ impl Endpoint {
|
|||||||
// and can cause errors like 'no unpinned buffers available', see
|
// and can cause errors like 'no unpinned buffers available', see
|
||||||
// <https://github.com/neondatabase/neon/issues/9956>
|
// <https://github.com/neondatabase/neon/issues/9956>
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
// Postgres defaults to effective_io_concurrency=1, which does not exercise the pageserver's
|
|
||||||
// batching logic. Set this to 2 so that we exercise the code a bit without letting
|
|
||||||
// individual tests do a lot of concurrent work on underpowered test machines
|
|
||||||
conf.append("effective_io_concurrency", "2");
|
|
||||||
conf.append("fsync", "off");
|
conf.append("fsync", "off");
|
||||||
conf.append("max_connections", "100");
|
conf.append("max_connections", "100");
|
||||||
conf.append("wal_level", "logical");
|
conf.append("wal_level", "logical");
|
||||||
@@ -585,7 +581,6 @@ impl Endpoint {
|
|||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
swap_size_bytes: None,
|
swap_size_bytes: None,
|
||||||
disk_quota_bytes: None,
|
disk_quota_bytes: None,
|
||||||
disable_lfc_resizing: None,
|
|
||||||
cluster: Cluster {
|
cluster: Cluster {
|
||||||
cluster_id: None, // project ID: not used
|
cluster_id: None, // project ID: not used
|
||||||
name: None, // project name: not used
|
name: None, // project name: not used
|
||||||
@@ -739,7 +734,7 @@ impl Endpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call the /status HTTP API
|
// Call the /status HTTP API
|
||||||
pub async fn get_status(&self) -> Result<ComputeStatusResponse> {
|
pub async fn get_status(&self) -> Result<ComputeState> {
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
@@ -815,7 +810,7 @@ impl Endpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let client = reqwest::Client::builder()
|
let client = reqwest::Client::builder()
|
||||||
.timeout(Duration::from_secs(120))
|
.timeout(Duration::from_secs(30))
|
||||||
.build()
|
.build()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let response = client
|
let response = client
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ pub struct LocalEnv {
|
|||||||
|
|
||||||
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
||||||
// be propagated into each pageserver's configuration.
|
// be propagated into each pageserver's configuration.
|
||||||
pub control_plane_api: Url,
|
pub control_plane_api: Option<Url>,
|
||||||
|
|
||||||
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
||||||
// storage controller's configuration.
|
// storage controller's configuration.
|
||||||
@@ -133,7 +133,7 @@ pub struct NeonLocalInitConf {
|
|||||||
pub storage_controller: Option<NeonStorageControllerConf>,
|
pub storage_controller: Option<NeonStorageControllerConf>,
|
||||||
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
||||||
pub safekeepers: Vec<SafekeeperConf>,
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
pub control_plane_api: Option<Url>,
|
pub control_plane_api: Option<Option<Url>>,
|
||||||
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ impl NeonStorageControllerConf {
|
|||||||
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
|
||||||
|
|
||||||
// Very tight heartbeat interval to speed up tests
|
// Very tight heartbeat interval to speed up tests
|
||||||
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(1000);
|
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for NeonStorageControllerConf {
|
impl Default for NeonStorageControllerConf {
|
||||||
@@ -535,7 +535,7 @@ impl LocalEnv {
|
|||||||
storage_controller,
|
storage_controller,
|
||||||
pageservers,
|
pageservers,
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api: control_plane_api.unwrap(),
|
control_plane_api,
|
||||||
control_plane_compute_hook_api,
|
control_plane_compute_hook_api,
|
||||||
branch_name_mappings,
|
branch_name_mappings,
|
||||||
}
|
}
|
||||||
@@ -638,7 +638,7 @@ impl LocalEnv {
|
|||||||
storage_controller: self.storage_controller.clone(),
|
storage_controller: self.storage_controller.clone(),
|
||||||
pageservers: vec![], // it's skip_serializing anyway
|
pageservers: vec![], // it's skip_serializing anyway
|
||||||
safekeepers: self.safekeepers.clone(),
|
safekeepers: self.safekeepers.clone(),
|
||||||
control_plane_api: Some(self.control_plane_api.clone()),
|
control_plane_api: self.control_plane_api.clone(),
|
||||||
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
||||||
branch_name_mappings: self.branch_name_mappings.clone(),
|
branch_name_mappings: self.branch_name_mappings.clone(),
|
||||||
},
|
},
|
||||||
@@ -768,7 +768,7 @@ impl LocalEnv {
|
|||||||
storage_controller: storage_controller.unwrap_or_default(),
|
storage_controller: storage_controller.unwrap_or_default(),
|
||||||
pageservers: pageservers.iter().map(Into::into).collect(),
|
pageservers: pageservers.iter().map(Into::into).collect(),
|
||||||
safekeepers,
|
safekeepers,
|
||||||
control_plane_api: control_plane_api.unwrap(),
|
control_plane_api: control_plane_api.unwrap_or_default(),
|
||||||
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
||||||
branch_name_mappings: Default::default(),
|
branch_name_mappings: Default::default(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -95,19 +95,21 @@ impl PageServerNode {
|
|||||||
|
|
||||||
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
||||||
|
|
||||||
overrides.push(format!(
|
if let Some(control_plane_api) = &self.env.control_plane_api {
|
||||||
"control_plane_api='{}'",
|
overrides.push(format!(
|
||||||
self.env.control_plane_api.as_str()
|
"control_plane_api='{}'",
|
||||||
));
|
control_plane_api.as_str()
|
||||||
|
));
|
||||||
|
|
||||||
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
||||||
// for us, we will also need it to talk to them.
|
// for us, we will also need it to talk to them.
|
||||||
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
||||||
let jwt_token = self
|
let jwt_token = self
|
||||||
.env
|
.env
|
||||||
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !conf.other.contains_key("remote_storage") {
|
if !conf.other.contains_key("remote_storage") {
|
||||||
@@ -433,7 +435,7 @@ impl PageServerNode {
|
|||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let config = Self::parse_config(settings)?;
|
let config = Self::parse_config(settings)?;
|
||||||
self.http_client
|
self.http_client
|
||||||
.set_tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
.tenant_config(&models::TenantConfigRequest { tenant_id, config })
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -338,7 +338,7 @@ impl StorageController {
|
|||||||
.port(),
|
.port(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
let listen_url = self.env.control_plane_api.clone();
|
let listen_url = self.env.control_plane_api.clone().unwrap();
|
||||||
|
|
||||||
let listen = format!(
|
let listen = format!(
|
||||||
"{}:{}",
|
"{}:{}",
|
||||||
@@ -708,7 +708,7 @@ impl StorageController {
|
|||||||
} else {
|
} else {
|
||||||
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
||||||
// for general purpose API access.
|
// for general purpose API access.
|
||||||
let listen_url = self.env.control_plane_api.clone();
|
let listen_url = self.env.control_plane_api.clone().unwrap();
|
||||||
Url::from_str(&format!(
|
Url::from_str(&format!(
|
||||||
"http://{}:{}/{path}",
|
"http://{}:{}/{path}",
|
||||||
listen_url.host_str().unwrap(),
|
listen_url.host_str().unwrap(),
|
||||||
|
|||||||
@@ -5,13 +5,12 @@ use clap::{Parser, Subcommand};
|
|||||||
use pageserver_api::{
|
use pageserver_api::{
|
||||||
controller_api::{
|
controller_api::{
|
||||||
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
|
||||||
SafekeeperDescribeResponse, ShardSchedulingPolicy, TenantCreateRequest,
|
ShardSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
|
||||||
TenantDescribeResponse, TenantPolicyRequest,
|
|
||||||
},
|
},
|
||||||
models::{
|
models::{
|
||||||
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
||||||
ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest,
|
ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest,
|
||||||
TenantShardSplitRequest, TenantShardSplitResponse,
|
TenantShardSplitResponse,
|
||||||
},
|
},
|
||||||
shard::{ShardStripeSize, TenantShardId},
|
shard::{ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -117,19 +116,9 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_shard_id: TenantShardId,
|
tenant_shard_id: TenantShardId,
|
||||||
},
|
},
|
||||||
/// Set the pageserver tenant configuration of a tenant: this is the configuration structure
|
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
|
||||||
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
||||||
/// Any previous tenant configs are overwritten.
|
TenantConfig {
|
||||||
SetTenantConfig {
|
|
||||||
#[arg(long)]
|
|
||||||
tenant_id: TenantId,
|
|
||||||
#[arg(long)]
|
|
||||||
config: String,
|
|
||||||
},
|
|
||||||
/// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
|
|
||||||
/// provided JSON are unset from the tenant config and all fields with non-null values are set.
|
|
||||||
/// Unspecified fields are not changed.
|
|
||||||
PatchTenantConfig {
|
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
@@ -212,8 +201,6 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
timeout: humantime::Duration,
|
timeout: humantime::Duration,
|
||||||
},
|
},
|
||||||
/// List safekeepers known to the storage controller
|
|
||||||
Safekeepers {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@@ -562,21 +549,11 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Command::SetTenantConfig { tenant_id, config } => {
|
Command::TenantConfig { tenant_id, config } => {
|
||||||
let tenant_conf = serde_json::from_str(&config)?;
|
let tenant_conf = serde_json::from_str(&config)?;
|
||||||
|
|
||||||
vps_client
|
vps_client
|
||||||
.set_tenant_config(&TenantConfigRequest {
|
.tenant_config(&TenantConfigRequest {
|
||||||
tenant_id,
|
|
||||||
config: tenant_conf,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Command::PatchTenantConfig { tenant_id, config } => {
|
|
||||||
let tenant_conf = serde_json::from_str(&config)?;
|
|
||||||
|
|
||||||
vps_client
|
|
||||||
.patch_tenant_config(&TenantConfigPatchRequest {
|
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: tenant_conf,
|
config: tenant_conf,
|
||||||
})
|
})
|
||||||
@@ -759,7 +736,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
threshold,
|
threshold,
|
||||||
} => {
|
} => {
|
||||||
vps_client
|
vps_client
|
||||||
.set_tenant_config(&TenantConfigRequest {
|
.tenant_config(&TenantConfigRequest {
|
||||||
tenant_id,
|
tenant_id,
|
||||||
config: TenantConfig {
|
config: TenantConfig {
|
||||||
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
||||||
@@ -1023,40 +1000,6 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Command::Safekeepers {} => {
|
|
||||||
let mut resp = storcon_client
|
|
||||||
.dispatch::<(), Vec<SafekeeperDescribeResponse>>(
|
|
||||||
Method::GET,
|
|
||||||
"control/v1/safekeeper".to_string(),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
resp.sort_by(|a, b| a.id.cmp(&b.id));
|
|
||||||
|
|
||||||
let mut table = comfy_table::Table::new();
|
|
||||||
table.set_header([
|
|
||||||
"Id",
|
|
||||||
"Version",
|
|
||||||
"Host",
|
|
||||||
"Port",
|
|
||||||
"Http Port",
|
|
||||||
"AZ Id",
|
|
||||||
"Scheduling",
|
|
||||||
]);
|
|
||||||
for sk in resp {
|
|
||||||
table.add_row([
|
|
||||||
format!("{}", sk.id),
|
|
||||||
format!("{}", sk.version),
|
|
||||||
sk.host,
|
|
||||||
format!("{}", sk.port),
|
|
||||||
format!("{}", sk.http_port),
|
|
||||||
sk.availability_zone_id.clone(),
|
|
||||||
String::from(sk.scheduling_policy),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
println!("{table}");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -42,7 +42,6 @@ allow = [
|
|||||||
"MPL-2.0",
|
"MPL-2.0",
|
||||||
"OpenSSL",
|
"OpenSSL",
|
||||||
"Unicode-DFS-2016",
|
"Unicode-DFS-2016",
|
||||||
"Unicode-3.0",
|
|
||||||
]
|
]
|
||||||
confidence-threshold = 0.8
|
confidence-threshold = 0.8
|
||||||
exceptions = [
|
exceptions = [
|
||||||
|
|||||||
@@ -132,6 +132,11 @@
|
|||||||
"name": "cron.database",
|
"name": "cron.database",
|
||||||
"value": "postgres",
|
"value": "postgres",
|
||||||
"vartype": "string"
|
"vartype": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "session_preload_libraries",
|
||||||
|
"value": "anon",
|
||||||
|
"vartype": "string"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -35,11 +35,11 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
echo "clean up containers if exists"
|
echo "clean up containers if exists"
|
||||||
cleanup
|
cleanup
|
||||||
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
||||||
# The support of pg_anon not yet added to PG17, so we have to add the corresponding option for other PG versions
|
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
||||||
if [ "${pg_version}" -ne 17 ]; then
|
if [ $pg_version -eq 17 ]; then
|
||||||
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
||||||
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
||||||
jq '.cluster.settings += [{"name": "session_preload_libraries","value": "anon","vartype": "string"}]' "${SPEC_PATH}/spec.bak" > "${SPEC_PATH}/spec.json"
|
jq 'del(.cluster.settings[] | select (.name == "session_preload_libraries"))' $SPEC_PATH/spec.bak > $SPEC_PATH/spec.json
|
||||||
fi
|
fi
|
||||||
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
||||||
|
|
||||||
@@ -106,8 +106,8 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
cleanup
|
cleanup
|
||||||
# Restore the original spec.json
|
# The support of pg_anon not yet added to PG17, so we have to remove the corresponding option
|
||||||
if [ "$pg_version" -ne 17 ]; then
|
if [ $pg_version -eq 17 ]; then
|
||||||
mv "$SPEC_PATH/spec.bak" "$SPEC_PATH/spec.json"
|
mv $SPEC_PATH/spec.bak $SPEC_PATH/spec.json
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
//! Structs representing the JSON formats used in the compute_ctl's HTTP API.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
@@ -15,17 +16,6 @@ pub struct GenericAPIError {
|
|||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct InfoResponse {
|
|
||||||
pub num_cpus: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct ExtensionInstallResponse {
|
|
||||||
pub extension: PgIdent,
|
|
||||||
pub version: ExtVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Response of the /status API
|
/// Response of the /status API
|
||||||
#[derive(Serialize, Debug, Deserialize)]
|
#[derive(Serialize, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
@@ -39,6 +29,16 @@ pub struct ComputeStatusResponse {
|
|||||||
pub error: Option<String>,
|
pub error: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub struct ComputeState {
|
||||||
|
pub status: ComputeStatus,
|
||||||
|
/// Timestamp of the last Postgres activity
|
||||||
|
#[serde(serialize_with = "rfc3339_serialize")]
|
||||||
|
pub last_active: Option<DateTime<Utc>>,
|
||||||
|
pub error: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)]
|
#[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum ComputeStatus {
|
pub enum ComputeStatus {
|
||||||
@@ -79,7 +79,7 @@ impl Display for ComputeStatus {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rfc3339_serialize<S>(x: &Option<DateTime<Utc>>, s: S) -> Result<S::Ok, S::Error>
|
fn rfc3339_serialize<S>(x: &Option<DateTime<Utc>>, s: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: Serializer,
|
S: Serializer,
|
||||||
{
|
{
|
||||||
@@ -163,9 +163,8 @@ pub enum ControlPlaneComputeStatus {
|
|||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
pub struct InstalledExtension {
|
pub struct InstalledExtension {
|
||||||
pub extname: String,
|
pub extname: String,
|
||||||
pub version: String,
|
pub versions: HashSet<String>,
|
||||||
pub n_databases: u32, // Number of databases using this extension
|
pub n_databases: u32, // Number of databases using this extension
|
||||||
pub owned_by_superuser: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
|
|||||||
@@ -67,15 +67,6 @@ pub struct ComputeSpec {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disk_quota_bytes: Option<u64>,
|
pub disk_quota_bytes: Option<u64>,
|
||||||
|
|
||||||
/// Disables the vm-monitor behavior that resizes LFC on upscale/downscale, instead relying on
|
|
||||||
/// the initial size of LFC.
|
|
||||||
///
|
|
||||||
/// This is intended for use when the LFC size is being overridden from the default but
|
|
||||||
/// autoscaling is still enabled, and we don't want the vm-monitor to interfere with the custom
|
|
||||||
/// LFC sizing.
|
|
||||||
#[serde(default)]
|
|
||||||
pub disable_lfc_resizing: Option<bool>,
|
|
||||||
|
|
||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ impl Timing {
|
|||||||
|
|
||||||
/// Return true if there is a ready event.
|
/// Return true if there is a ready event.
|
||||||
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool {
|
||||||
queue.peek().is_some_and(|x| x.time <= self.now())
|
queue.peek().map_or(false, |x| x.time <= self.now())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear all pending events.
|
/// Clear all pending events.
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ pub struct TenantPolicyRequest {
|
|||||||
pub scheduling: Option<ShardSchedulingPolicy>,
|
pub scheduling: Option<ShardSchedulingPolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
|
||||||
pub struct AvailabilityZone(pub String);
|
pub struct AvailabilityZone(pub String);
|
||||||
|
|
||||||
impl Display for AvailabilityZone {
|
impl Display for AvailabilityZone {
|
||||||
@@ -245,17 +245,6 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scheduling policy enables us to selectively disable some automatic actions that the
|
|
||||||
/// controller performs on a tenant shard. This is only set to a non-default value by
|
|
||||||
/// human intervention, and it is reset to the default value (Active) when the tenant's
|
|
||||||
/// placement policy is modified away from Attached.
|
|
||||||
///
|
|
||||||
/// The typical use of a non-Active scheduling policy is one of:
|
|
||||||
/// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
|
|
||||||
/// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
|
|
||||||
///
|
|
||||||
/// If you're not sure which policy to use to pin a shard to its current location, you probably
|
|
||||||
/// want Pause.
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub enum ShardSchedulingPolicy {
|
pub enum ShardSchedulingPolicy {
|
||||||
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
||||||
@@ -320,38 +309,6 @@ impl From<NodeSchedulingPolicy> for String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
|
||||||
pub enum SkSchedulingPolicy {
|
|
||||||
Active,
|
|
||||||
Disabled,
|
|
||||||
Decomissioned,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for SkSchedulingPolicy {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Ok(match s {
|
|
||||||
"active" => Self::Active,
|
|
||||||
"disabled" => Self::Disabled,
|
|
||||||
"decomissioned" => Self::Decomissioned,
|
|
||||||
_ => return Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SkSchedulingPolicy> for String {
|
|
||||||
fn from(value: SkSchedulingPolicy) -> String {
|
|
||||||
use SkSchedulingPolicy::*;
|
|
||||||
match value {
|
|
||||||
Active => "active",
|
|
||||||
Disabled => "disabled",
|
|
||||||
Decomissioned => "decomissioned",
|
|
||||||
}
|
|
||||||
.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
|
/// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
|
||||||
/// to create secondary locations.
|
/// to create secondary locations.
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
|
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
|
||||||
@@ -404,24 +361,6 @@ pub struct MetadataHealthListOutdatedResponse {
|
|||||||
pub health_records: Vec<MetadataHealthRecord>,
|
pub health_records: Vec<MetadataHealthRecord>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Publicly exposed safekeeper description
|
|
||||||
///
|
|
||||||
/// The `active` flag which we have in the DB is not included on purpose: it is deprecated.
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
|
||||||
pub struct SafekeeperDescribeResponse {
|
|
||||||
pub id: NodeId,
|
|
||||||
pub region_id: String,
|
|
||||||
/// 1 is special, it means just created (not currently posted to storcon).
|
|
||||||
/// Zero or negative is not really expected.
|
|
||||||
/// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
|
|
||||||
pub version: i64,
|
|
||||||
pub host: String,
|
|
||||||
pub port: i32,
|
|
||||||
pub http_port: i32,
|
|
||||||
pub availability_zone_id: String,
|
|
||||||
pub scheduling_policy: SkSchedulingPolicy,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub struct Key {
|
|||||||
|
|
||||||
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
/// When working with large numbers of Keys in-memory, it is more efficient to handle them as i128 than as
|
||||||
/// a struct of fields.
|
/// a struct of fields.
|
||||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]
|
||||||
pub struct CompactKey(i128);
|
pub struct CompactKey(i128);
|
||||||
|
|
||||||
/// The storage key size.
|
/// The storage key size.
|
||||||
@@ -565,10 +565,6 @@ impl Key {
|
|||||||
&& self.field5 == 0
|
&& self.field5 == 0
|
||||||
&& self.field6 == u32::MAX
|
&& self.field6 == u32::MAX
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_slru_dir_key(&self) -> bool {
|
|
||||||
slru_dir_kind(self).is_some()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ pub mod utilization;
|
|||||||
use camino::Utf8PathBuf;
|
use camino::Utf8PathBuf;
|
||||||
pub use utilization::PageserverUtilization;
|
pub use utilization::PageserverUtilization;
|
||||||
|
|
||||||
use core::ops::Range;
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
@@ -18,7 +17,7 @@ use std::{
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use postgres_ffi::BLCKSZ;
|
use postgres_ffi::BLCKSZ;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::serde_as;
|
use serde_with::serde_as;
|
||||||
use utils::{
|
use utils::{
|
||||||
completion,
|
completion,
|
||||||
@@ -29,7 +28,6 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
key::Key,
|
|
||||||
reltag::RelTag,
|
reltag::RelTag,
|
||||||
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
shard::{ShardCount, ShardStripeSize, TenantShardId},
|
||||||
};
|
};
|
||||||
@@ -212,68 +210,6 @@ pub enum TimelineState {
|
|||||||
Broken { reason: String, backtrace: String },
|
Broken { reason: String, backtrace: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub struct CompactLsnRange {
|
|
||||||
pub start: Lsn,
|
|
||||||
pub end: Lsn,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub struct CompactKeyRange {
|
|
||||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
|
||||||
pub start: Key,
|
|
||||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
|
||||||
pub end: Key,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Range<Lsn>> for CompactLsnRange {
|
|
||||||
fn from(range: Range<Lsn>) -> Self {
|
|
||||||
Self {
|
|
||||||
start: range.start,
|
|
||||||
end: range.end,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Range<Key>> for CompactKeyRange {
|
|
||||||
fn from(range: Range<Key>) -> Self {
|
|
||||||
Self {
|
|
||||||
start: range.start,
|
|
||||||
end: range.end,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<CompactLsnRange> for Range<Lsn> {
|
|
||||||
fn from(range: CompactLsnRange) -> Self {
|
|
||||||
range.start..range.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<CompactKeyRange> for Range<Key> {
|
|
||||||
fn from(range: CompactKeyRange) -> Self {
|
|
||||||
range.start..range.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompactLsnRange {
|
|
||||||
pub fn above(lsn: Lsn) -> Self {
|
|
||||||
Self {
|
|
||||||
start: lsn,
|
|
||||||
end: Lsn::MAX,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct CompactInfoResponse {
|
|
||||||
pub compact_key_range: Option<CompactKeyRange>,
|
|
||||||
pub compact_lsn_range: Option<CompactLsnRange>,
|
|
||||||
pub sub_compaction: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
pub struct TimelineCreateRequest {
|
pub struct TimelineCreateRequest {
|
||||||
pub new_timeline_id: TimelineId,
|
pub new_timeline_id: TimelineId,
|
||||||
@@ -389,115 +325,6 @@ impl Default for ShardParameters {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, Eq, PartialEq)]
|
|
||||||
pub enum FieldPatch<T> {
|
|
||||||
Upsert(T),
|
|
||||||
Remove,
|
|
||||||
#[default]
|
|
||||||
Noop,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> FieldPatch<T> {
|
|
||||||
fn is_noop(&self) -> bool {
|
|
||||||
matches!(self, FieldPatch::Noop)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn apply(self, target: &mut Option<T>) {
|
|
||||||
match self {
|
|
||||||
Self::Upsert(v) => *target = Some(v),
|
|
||||||
Self::Remove => *target = None,
|
|
||||||
Self::Noop => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
|
|
||||||
match self {
|
|
||||||
Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
|
|
||||||
Self::Remove => Ok(FieldPatch::<U>::Remove),
|
|
||||||
Self::Noop => Ok(FieldPatch::<U>::Noop),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
Option::deserialize(deserializer).map(|opt| match opt {
|
|
||||||
None => FieldPatch::Remove,
|
|
||||||
Some(val) => FieldPatch::Upsert(val),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize> Serialize for FieldPatch<T> {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
match self {
|
|
||||||
FieldPatch::Upsert(val) => serializer.serialize_some(val),
|
|
||||||
FieldPatch::Remove => serializer.serialize_none(),
|
|
||||||
FieldPatch::Noop => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub struct TenantConfigPatch {
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub checkpoint_distance: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub checkpoint_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_target_size: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_threshold: FieldPatch<usize>,
|
|
||||||
// defer parsing compaction_algorithm, like eviction_policy
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub gc_horizon: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub gc_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub image_creation_threshold: FieldPatch<usize>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub pitr_interval: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub walreceiver_connect_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lagging_wal_timeout: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub eviction_policy: FieldPatch<EvictionPolicy>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub min_resident_size_override: FieldPatch<u64>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub heatmap_period: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lazy_slru_download: FieldPatch<bool>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub image_layer_creation_check_threshold: FieldPatch<u8>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lsn_lease_length: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub lsn_lease_length_for_ts: FieldPatch<String>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub timeline_offloading: FieldPatch<bool>,
|
|
||||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
|
||||||
pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
||||||
/// simpler types.
|
/// simpler types.
|
||||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
|
||||||
@@ -529,107 +356,6 @@ pub struct TenantConfig {
|
|||||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TenantConfig {
|
|
||||||
pub fn apply_patch(self, patch: TenantConfigPatch) -> TenantConfig {
|
|
||||||
let Self {
|
|
||||||
mut checkpoint_distance,
|
|
||||||
mut checkpoint_timeout,
|
|
||||||
mut compaction_target_size,
|
|
||||||
mut compaction_period,
|
|
||||||
mut compaction_threshold,
|
|
||||||
mut compaction_algorithm,
|
|
||||||
mut gc_horizon,
|
|
||||||
mut gc_period,
|
|
||||||
mut image_creation_threshold,
|
|
||||||
mut pitr_interval,
|
|
||||||
mut walreceiver_connect_timeout,
|
|
||||||
mut lagging_wal_timeout,
|
|
||||||
mut max_lsn_wal_lag,
|
|
||||||
mut eviction_policy,
|
|
||||||
mut min_resident_size_override,
|
|
||||||
mut evictions_low_residence_duration_metric_threshold,
|
|
||||||
mut heatmap_period,
|
|
||||||
mut lazy_slru_download,
|
|
||||||
mut timeline_get_throttle,
|
|
||||||
mut image_layer_creation_check_threshold,
|
|
||||||
mut lsn_lease_length,
|
|
||||||
mut lsn_lease_length_for_ts,
|
|
||||||
mut timeline_offloading,
|
|
||||||
mut wal_receiver_protocol_override,
|
|
||||||
} = self;
|
|
||||||
|
|
||||||
patch.checkpoint_distance.apply(&mut checkpoint_distance);
|
|
||||||
patch.checkpoint_timeout.apply(&mut checkpoint_timeout);
|
|
||||||
patch
|
|
||||||
.compaction_target_size
|
|
||||||
.apply(&mut compaction_target_size);
|
|
||||||
patch.compaction_period.apply(&mut compaction_period);
|
|
||||||
patch.compaction_threshold.apply(&mut compaction_threshold);
|
|
||||||
patch.compaction_algorithm.apply(&mut compaction_algorithm);
|
|
||||||
patch.gc_horizon.apply(&mut gc_horizon);
|
|
||||||
patch.gc_period.apply(&mut gc_period);
|
|
||||||
patch
|
|
||||||
.image_creation_threshold
|
|
||||||
.apply(&mut image_creation_threshold);
|
|
||||||
patch.pitr_interval.apply(&mut pitr_interval);
|
|
||||||
patch
|
|
||||||
.walreceiver_connect_timeout
|
|
||||||
.apply(&mut walreceiver_connect_timeout);
|
|
||||||
patch.lagging_wal_timeout.apply(&mut lagging_wal_timeout);
|
|
||||||
patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
|
|
||||||
patch.eviction_policy.apply(&mut eviction_policy);
|
|
||||||
patch
|
|
||||||
.min_resident_size_override
|
|
||||||
.apply(&mut min_resident_size_override);
|
|
||||||
patch
|
|
||||||
.evictions_low_residence_duration_metric_threshold
|
|
||||||
.apply(&mut evictions_low_residence_duration_metric_threshold);
|
|
||||||
patch.heatmap_period.apply(&mut heatmap_period);
|
|
||||||
patch.lazy_slru_download.apply(&mut lazy_slru_download);
|
|
||||||
patch
|
|
||||||
.timeline_get_throttle
|
|
||||||
.apply(&mut timeline_get_throttle);
|
|
||||||
patch
|
|
||||||
.image_layer_creation_check_threshold
|
|
||||||
.apply(&mut image_layer_creation_check_threshold);
|
|
||||||
patch.lsn_lease_length.apply(&mut lsn_lease_length);
|
|
||||||
patch
|
|
||||||
.lsn_lease_length_for_ts
|
|
||||||
.apply(&mut lsn_lease_length_for_ts);
|
|
||||||
patch.timeline_offloading.apply(&mut timeline_offloading);
|
|
||||||
patch
|
|
||||||
.wal_receiver_protocol_override
|
|
||||||
.apply(&mut wal_receiver_protocol_override);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
checkpoint_distance,
|
|
||||||
checkpoint_timeout,
|
|
||||||
compaction_target_size,
|
|
||||||
compaction_period,
|
|
||||||
compaction_threshold,
|
|
||||||
compaction_algorithm,
|
|
||||||
gc_horizon,
|
|
||||||
gc_period,
|
|
||||||
image_creation_threshold,
|
|
||||||
pitr_interval,
|
|
||||||
walreceiver_connect_timeout,
|
|
||||||
lagging_wal_timeout,
|
|
||||||
max_lsn_wal_lag,
|
|
||||||
eviction_policy,
|
|
||||||
min_resident_size_override,
|
|
||||||
evictions_low_residence_duration_metric_threshold,
|
|
||||||
heatmap_period,
|
|
||||||
lazy_slru_download,
|
|
||||||
timeline_get_throttle,
|
|
||||||
image_layer_creation_check_threshold,
|
|
||||||
lsn_lease_length,
|
|
||||||
lsn_lease_length_for_ts,
|
|
||||||
timeline_offloading,
|
|
||||||
wal_receiver_protocol_override,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The policy for the aux file storage.
|
/// The policy for the aux file storage.
|
||||||
///
|
///
|
||||||
/// It can be switched through `switch_aux_file_policy` tenant config.
|
/// It can be switched through `switch_aux_file_policy` tenant config.
|
||||||
@@ -960,14 +686,6 @@ impl TenantConfigRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct TenantConfigPatchRequest {
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
/// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
#[serde(tag = "slug", content = "data", rename_all = "snake_case")]
|
||||||
@@ -1460,91 +1178,75 @@ impl TryFrom<u8> for PagestreamBeMessageTag {
|
|||||||
// interface allows sending both LSNs, and let the pageserver do the right thing. There was no
|
// interface allows sending both LSNs, and let the pageserver do the right thing. There was no
|
||||||
// difference in the responses between V1 and V2.
|
// difference in the responses between V1 and V2.
|
||||||
//
|
//
|
||||||
// V3 version of protocol adds request ID to all requests. This request ID is also included in response
|
#[derive(Clone, Copy)]
|
||||||
// as well as other fields from requests, which allows to verify that we receive response for our request.
|
|
||||||
// We copy fields from request to response to make checking more reliable: request ID is formed from process ID
|
|
||||||
// and local counter, so in principle there can be duplicated requests IDs if process PID is reused.
|
|
||||||
//
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
pub enum PagestreamProtocolVersion {
|
pub enum PagestreamProtocolVersion {
|
||||||
V2,
|
V2,
|
||||||
V3,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type RequestId = u64;
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub struct PagestreamExistsRequest {
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
pub struct PagestreamRequest {
|
|
||||||
pub reqid: RequestId,
|
|
||||||
pub request_lsn: Lsn,
|
pub request_lsn: Lsn,
|
||||||
pub not_modified_since: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
pub struct PagestreamExistsRequest {
|
|
||||||
pub hdr: PagestreamRequest,
|
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamNblocksRequest {
|
pub struct PagestreamNblocksRequest {
|
||||||
pub hdr: PagestreamRequest,
|
pub request_lsn: Lsn,
|
||||||
|
pub not_modified_since: Lsn,
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamGetPageRequest {
|
pub struct PagestreamGetPageRequest {
|
||||||
pub hdr: PagestreamRequest,
|
pub request_lsn: Lsn,
|
||||||
|
pub not_modified_since: Lsn,
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
pub blkno: u32,
|
pub blkno: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamDbSizeRequest {
|
pub struct PagestreamDbSizeRequest {
|
||||||
pub hdr: PagestreamRequest,
|
pub request_lsn: Lsn,
|
||||||
|
pub not_modified_since: Lsn,
|
||||||
pub dbnode: u32,
|
pub dbnode: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamGetSlruSegmentRequest {
|
pub struct PagestreamGetSlruSegmentRequest {
|
||||||
pub hdr: PagestreamRequest,
|
pub request_lsn: Lsn,
|
||||||
|
pub not_modified_since: Lsn,
|
||||||
pub kind: u8,
|
pub kind: u8,
|
||||||
pub segno: u32,
|
pub segno: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamExistsResponse {
|
pub struct PagestreamExistsResponse {
|
||||||
pub req: PagestreamExistsRequest,
|
|
||||||
pub exists: bool,
|
pub exists: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamNblocksResponse {
|
pub struct PagestreamNblocksResponse {
|
||||||
pub req: PagestreamNblocksRequest,
|
|
||||||
pub n_blocks: u32,
|
pub n_blocks: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamGetPageResponse {
|
pub struct PagestreamGetPageResponse {
|
||||||
pub req: PagestreamGetPageRequest,
|
|
||||||
pub page: Bytes,
|
pub page: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamGetSlruSegmentResponse {
|
pub struct PagestreamGetSlruSegmentResponse {
|
||||||
pub req: PagestreamGetSlruSegmentRequest,
|
|
||||||
pub segment: Bytes,
|
pub segment: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamErrorResponse {
|
pub struct PagestreamErrorResponse {
|
||||||
pub req: PagestreamRequest,
|
|
||||||
pub message: String,
|
pub message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PagestreamDbSizeResponse {
|
pub struct PagestreamDbSizeResponse {
|
||||||
pub req: PagestreamDbSizeRequest,
|
|
||||||
pub db_size: i64,
|
pub db_size: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1561,16 +1263,15 @@ pub struct TenantHistorySize {
|
|||||||
|
|
||||||
impl PagestreamFeMessage {
|
impl PagestreamFeMessage {
|
||||||
/// Serialize a compute -> pageserver message. This is currently only used in testing
|
/// Serialize a compute -> pageserver message. This is currently only used in testing
|
||||||
/// tools. Always uses protocol version 3.
|
/// tools. Always uses protocol version 2.
|
||||||
pub fn serialize(&self) -> Bytes {
|
pub fn serialize(&self) -> Bytes {
|
||||||
let mut bytes = BytesMut::new();
|
let mut bytes = BytesMut::new();
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
Self::Exists(req) => {
|
Self::Exists(req) => {
|
||||||
bytes.put_u8(0);
|
bytes.put_u8(0);
|
||||||
bytes.put_u64(req.hdr.reqid);
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.hdr.request_lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u64(req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -1579,9 +1280,8 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::Nblocks(req) => {
|
Self::Nblocks(req) => {
|
||||||
bytes.put_u8(1);
|
bytes.put_u8(1);
|
||||||
bytes.put_u64(req.hdr.reqid);
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.hdr.request_lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u64(req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -1590,9 +1290,8 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::GetPage(req) => {
|
Self::GetPage(req) => {
|
||||||
bytes.put_u8(2);
|
bytes.put_u8(2);
|
||||||
bytes.put_u64(req.hdr.reqid);
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.hdr.request_lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u64(req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -1602,17 +1301,15 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::DbSize(req) => {
|
Self::DbSize(req) => {
|
||||||
bytes.put_u8(3);
|
bytes.put_u8(3);
|
||||||
bytes.put_u64(req.hdr.reqid);
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.hdr.request_lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u64(req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(req.dbnode);
|
bytes.put_u32(req.dbnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
Self::GetSlruSegment(req) => {
|
Self::GetSlruSegment(req) => {
|
||||||
bytes.put_u8(4);
|
bytes.put_u8(4);
|
||||||
bytes.put_u64(req.hdr.reqid);
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.hdr.request_lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u64(req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u8(req.kind);
|
bytes.put_u8(req.kind);
|
||||||
bytes.put_u32(req.segno);
|
bytes.put_u32(req.segno);
|
||||||
}
|
}
|
||||||
@@ -1621,35 +1318,21 @@ impl PagestreamFeMessage {
|
|||||||
bytes.into()
|
bytes.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse<R: std::io::Read>(
|
pub fn parse<R: std::io::Read>(body: &mut R) -> anyhow::Result<PagestreamFeMessage> {
|
||||||
body: &mut R,
|
|
||||||
protocol_version: PagestreamProtocolVersion,
|
|
||||||
) -> anyhow::Result<PagestreamFeMessage> {
|
|
||||||
// these correspond to the NeonMessageTag enum in pagestore_client.h
|
// these correspond to the NeonMessageTag enum in pagestore_client.h
|
||||||
//
|
//
|
||||||
// TODO: consider using protobuf or serde bincode for less error prone
|
// TODO: consider using protobuf or serde bincode for less error prone
|
||||||
// serialization.
|
// serialization.
|
||||||
let msg_tag = body.read_u8()?;
|
let msg_tag = body.read_u8()?;
|
||||||
let (reqid, request_lsn, not_modified_since) = match protocol_version {
|
|
||||||
PagestreamProtocolVersion::V2 => (
|
// these two fields are the same for every request type
|
||||||
0,
|
let request_lsn = Lsn::from(body.read_u64::<BigEndian>()?);
|
||||||
Lsn::from(body.read_u64::<BigEndian>()?),
|
let not_modified_since = Lsn::from(body.read_u64::<BigEndian>()?);
|
||||||
Lsn::from(body.read_u64::<BigEndian>()?),
|
|
||||||
),
|
|
||||||
PagestreamProtocolVersion::V3 => (
|
|
||||||
body.read_u64::<BigEndian>()?,
|
|
||||||
Lsn::from(body.read_u64::<BigEndian>()?),
|
|
||||||
Lsn::from(body.read_u64::<BigEndian>()?),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
match msg_tag {
|
match msg_tag {
|
||||||
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn,
|
||||||
reqid,
|
not_modified_since,
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1658,11 +1341,8 @@ impl PagestreamFeMessage {
|
|||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn,
|
||||||
reqid,
|
not_modified_since,
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1671,11 +1351,8 @@ impl PagestreamFeMessage {
|
|||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn,
|
||||||
reqid,
|
not_modified_since,
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1685,20 +1362,14 @@ impl PagestreamFeMessage {
|
|||||||
blkno: body.read_u32::<BigEndian>()?,
|
blkno: body.read_u32::<BigEndian>()?,
|
||||||
})),
|
})),
|
||||||
3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn,
|
||||||
reqid,
|
not_modified_since,
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
})),
|
})),
|
||||||
4 => Ok(PagestreamFeMessage::GetSlruSegment(
|
4 => Ok(PagestreamFeMessage::GetSlruSegment(
|
||||||
PagestreamGetSlruSegmentRequest {
|
PagestreamGetSlruSegmentRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn,
|
||||||
reqid,
|
not_modified_since,
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
kind: body.read_u8()?,
|
kind: body.read_u8()?,
|
||||||
segno: body.read_u32::<BigEndian>()?,
|
segno: body.read_u32::<BigEndian>()?,
|
||||||
},
|
},
|
||||||
@@ -1709,114 +1380,43 @@ impl PagestreamFeMessage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PagestreamBeMessage {
|
impl PagestreamBeMessage {
|
||||||
pub fn serialize(&self, protocol_version: PagestreamProtocolVersion) -> Bytes {
|
pub fn serialize(&self) -> Bytes {
|
||||||
let mut bytes = BytesMut::new();
|
let mut bytes = BytesMut::new();
|
||||||
|
|
||||||
use PagestreamBeMessageTag as Tag;
|
use PagestreamBeMessageTag as Tag;
|
||||||
match protocol_version {
|
match self {
|
||||||
PagestreamProtocolVersion::V2 => {
|
Self::Exists(resp) => {
|
||||||
match self {
|
bytes.put_u8(Tag::Exists as u8);
|
||||||
Self::Exists(resp) => {
|
bytes.put_u8(resp.exists as u8);
|
||||||
bytes.put_u8(Tag::Exists as u8);
|
|
||||||
bytes.put_u8(resp.exists as u8);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::Nblocks(resp) => {
|
|
||||||
bytes.put_u8(Tag::Nblocks as u8);
|
|
||||||
bytes.put_u32(resp.n_blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::GetPage(resp) => {
|
|
||||||
bytes.put_u8(Tag::GetPage as u8);
|
|
||||||
bytes.put(&resp.page[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::Error(resp) => {
|
|
||||||
bytes.put_u8(Tag::Error as u8);
|
|
||||||
bytes.put(resp.message.as_bytes());
|
|
||||||
bytes.put_u8(0); // null terminator
|
|
||||||
}
|
|
||||||
Self::DbSize(resp) => {
|
|
||||||
bytes.put_u8(Tag::DbSize as u8);
|
|
||||||
bytes.put_i64(resp.db_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::GetSlruSegment(resp) => {
|
|
||||||
bytes.put_u8(Tag::GetSlruSegment as u8);
|
|
||||||
bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32);
|
|
||||||
bytes.put(&resp.segment[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
PagestreamProtocolVersion::V3 => {
|
|
||||||
match self {
|
|
||||||
Self::Exists(resp) => {
|
|
||||||
bytes.put_u8(Tag::Exists as u8);
|
|
||||||
bytes.put_u64(resp.req.hdr.reqid);
|
|
||||||
bytes.put_u64(resp.req.hdr.request_lsn.0);
|
|
||||||
bytes.put_u64(resp.req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(resp.req.rel.spcnode);
|
|
||||||
bytes.put_u32(resp.req.rel.dbnode);
|
|
||||||
bytes.put_u32(resp.req.rel.relnode);
|
|
||||||
bytes.put_u8(resp.req.rel.forknum);
|
|
||||||
bytes.put_u8(resp.exists as u8);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::Nblocks(resp) => {
|
Self::Nblocks(resp) => {
|
||||||
bytes.put_u8(Tag::Nblocks as u8);
|
bytes.put_u8(Tag::Nblocks as u8);
|
||||||
bytes.put_u64(resp.req.hdr.reqid);
|
bytes.put_u32(resp.n_blocks);
|
||||||
bytes.put_u64(resp.req.hdr.request_lsn.0);
|
}
|
||||||
bytes.put_u64(resp.req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(resp.req.rel.spcnode);
|
|
||||||
bytes.put_u32(resp.req.rel.dbnode);
|
|
||||||
bytes.put_u32(resp.req.rel.relnode);
|
|
||||||
bytes.put_u8(resp.req.rel.forknum);
|
|
||||||
bytes.put_u32(resp.n_blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::GetPage(resp) => {
|
Self::GetPage(resp) => {
|
||||||
bytes.put_u8(Tag::GetPage as u8);
|
bytes.put_u8(Tag::GetPage as u8);
|
||||||
bytes.put_u64(resp.req.hdr.reqid);
|
bytes.put(&resp.page[..]);
|
||||||
bytes.put_u64(resp.req.hdr.request_lsn.0);
|
}
|
||||||
bytes.put_u64(resp.req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(resp.req.rel.spcnode);
|
|
||||||
bytes.put_u32(resp.req.rel.dbnode);
|
|
||||||
bytes.put_u32(resp.req.rel.relnode);
|
|
||||||
bytes.put_u8(resp.req.rel.forknum);
|
|
||||||
bytes.put_u32(resp.req.blkno);
|
|
||||||
bytes.put(&resp.page[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::Error(resp) => {
|
Self::Error(resp) => {
|
||||||
bytes.put_u8(Tag::Error as u8);
|
bytes.put_u8(Tag::Error as u8);
|
||||||
bytes.put_u64(resp.req.reqid);
|
bytes.put(resp.message.as_bytes());
|
||||||
bytes.put_u64(resp.req.request_lsn.0);
|
bytes.put_u8(0); // null terminator
|
||||||
bytes.put_u64(resp.req.not_modified_since.0);
|
}
|
||||||
bytes.put(resp.message.as_bytes());
|
Self::DbSize(resp) => {
|
||||||
bytes.put_u8(0); // null terminator
|
bytes.put_u8(Tag::DbSize as u8);
|
||||||
}
|
bytes.put_i64(resp.db_size);
|
||||||
Self::DbSize(resp) => {
|
}
|
||||||
bytes.put_u8(Tag::DbSize as u8);
|
|
||||||
bytes.put_u64(resp.req.hdr.reqid);
|
|
||||||
bytes.put_u64(resp.req.hdr.request_lsn.0);
|
|
||||||
bytes.put_u64(resp.req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u32(resp.req.dbnode);
|
|
||||||
bytes.put_i64(resp.db_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::GetSlruSegment(resp) => {
|
Self::GetSlruSegment(resp) => {
|
||||||
bytes.put_u8(Tag::GetSlruSegment as u8);
|
bytes.put_u8(Tag::GetSlruSegment as u8);
|
||||||
bytes.put_u64(resp.req.hdr.reqid);
|
bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32);
|
||||||
bytes.put_u64(resp.req.hdr.request_lsn.0);
|
bytes.put(&resp.segment[..]);
|
||||||
bytes.put_u64(resp.req.hdr.not_modified_since.0);
|
|
||||||
bytes.put_u8(resp.req.kind);
|
|
||||||
bytes.put_u32(resp.req.segno);
|
|
||||||
bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32);
|
|
||||||
bytes.put(&resp.segment[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes.into()
|
bytes.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1828,131 +1428,38 @@ impl PagestreamBeMessage {
|
|||||||
let ok =
|
let ok =
|
||||||
match Tag::try_from(msg_tag).map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? {
|
match Tag::try_from(msg_tag).map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? {
|
||||||
Tag::Exists => {
|
Tag::Exists => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
let exists = buf.read_u8()?;
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let rel = RelTag {
|
|
||||||
spcnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
dbnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
relnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
forknum: buf.read_u8()?,
|
|
||||||
};
|
|
||||||
let exists = buf.read_u8()? != 0;
|
|
||||||
Self::Exists(PagestreamExistsResponse {
|
Self::Exists(PagestreamExistsResponse {
|
||||||
req: PagestreamExistsRequest {
|
exists: exists != 0,
|
||||||
hdr: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel,
|
|
||||||
},
|
|
||||||
exists,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Tag::Nblocks => {
|
Tag::Nblocks => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let rel = RelTag {
|
|
||||||
spcnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
dbnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
relnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
forknum: buf.read_u8()?,
|
|
||||||
};
|
|
||||||
let n_blocks = buf.read_u32::<BigEndian>()?;
|
let n_blocks = buf.read_u32::<BigEndian>()?;
|
||||||
Self::Nblocks(PagestreamNblocksResponse {
|
Self::Nblocks(PagestreamNblocksResponse { n_blocks })
|
||||||
req: PagestreamNblocksRequest {
|
|
||||||
hdr: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel,
|
|
||||||
},
|
|
||||||
n_blocks,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
Tag::GetPage => {
|
Tag::GetPage => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let rel = RelTag {
|
|
||||||
spcnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
dbnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
relnode: buf.read_u32::<BigEndian>()?,
|
|
||||||
forknum: buf.read_u8()?,
|
|
||||||
};
|
|
||||||
let blkno = buf.read_u32::<BigEndian>()?;
|
|
||||||
let mut page = vec![0; 8192]; // TODO: use MaybeUninit
|
let mut page = vec![0; 8192]; // TODO: use MaybeUninit
|
||||||
buf.read_exact(&mut page)?;
|
buf.read_exact(&mut page)?;
|
||||||
Self::GetPage(PagestreamGetPageResponse {
|
PagestreamBeMessage::GetPage(PagestreamGetPageResponse { page: page.into() })
|
||||||
req: PagestreamGetPageRequest {
|
|
||||||
hdr: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
rel,
|
|
||||||
blkno,
|
|
||||||
},
|
|
||||||
page: page.into(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
Tag::Error => {
|
Tag::Error => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let mut msg = Vec::new();
|
let mut msg = Vec::new();
|
||||||
buf.read_until(0, &mut msg)?;
|
buf.read_until(0, &mut msg)?;
|
||||||
let cstring = std::ffi::CString::from_vec_with_nul(msg)?;
|
let cstring = std::ffi::CString::from_vec_with_nul(msg)?;
|
||||||
let rust_str = cstring.to_str()?;
|
let rust_str = cstring.to_str()?;
|
||||||
Self::Error(PagestreamErrorResponse {
|
PagestreamBeMessage::Error(PagestreamErrorResponse {
|
||||||
req: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
message: rust_str.to_owned(),
|
message: rust_str.to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Tag::DbSize => {
|
Tag::DbSize => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let dbnode = buf.read_u32::<BigEndian>()?;
|
|
||||||
let db_size = buf.read_i64::<BigEndian>()?;
|
let db_size = buf.read_i64::<BigEndian>()?;
|
||||||
Self::DbSize(PagestreamDbSizeResponse {
|
Self::DbSize(PagestreamDbSizeResponse { db_size })
|
||||||
req: PagestreamDbSizeRequest {
|
|
||||||
hdr: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
dbnode,
|
|
||||||
},
|
|
||||||
db_size,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
Tag::GetSlruSegment => {
|
Tag::GetSlruSegment => {
|
||||||
let reqid = buf.read_u64::<BigEndian>()?;
|
|
||||||
let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
|
|
||||||
let kind = buf.read_u8()?;
|
|
||||||
let segno = buf.read_u32::<BigEndian>()?;
|
|
||||||
let n_blocks = buf.read_u32::<BigEndian>()?;
|
let n_blocks = buf.read_u32::<BigEndian>()?;
|
||||||
let mut segment = vec![0; n_blocks as usize * BLCKSZ as usize];
|
let mut segment = vec![0; n_blocks as usize * BLCKSZ as usize];
|
||||||
buf.read_exact(&mut segment)?;
|
buf.read_exact(&mut segment)?;
|
||||||
Self::GetSlruSegment(PagestreamGetSlruSegmentResponse {
|
Self::GetSlruSegment(PagestreamGetSlruSegmentResponse {
|
||||||
req: PagestreamGetSlruSegmentRequest {
|
|
||||||
hdr: PagestreamRequest {
|
|
||||||
reqid,
|
|
||||||
request_lsn,
|
|
||||||
not_modified_since,
|
|
||||||
},
|
|
||||||
kind,
|
|
||||||
segno,
|
|
||||||
},
|
|
||||||
segment: segment.into(),
|
segment: segment.into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1991,11 +1498,8 @@ mod tests {
|
|||||||
// Test serialization/deserialization of PagestreamFeMessage
|
// Test serialization/deserialization of PagestreamFeMessage
|
||||||
let messages = vec![
|
let messages = vec![
|
||||||
PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn: Lsn(4),
|
||||||
reqid: 0,
|
not_modified_since: Lsn(3),
|
||||||
request_lsn: Lsn(4),
|
|
||||||
not_modified_since: Lsn(3),
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -2004,11 +1508,8 @@ mod tests {
|
|||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn: Lsn(4),
|
||||||
reqid: 0,
|
not_modified_since: Lsn(4),
|
||||||
request_lsn: Lsn(4),
|
|
||||||
not_modified_since: Lsn(4),
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -2017,11 +1518,8 @@ mod tests {
|
|||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn: Lsn(4),
|
||||||
reqid: 0,
|
not_modified_since: Lsn(3),
|
||||||
request_lsn: Lsn(4),
|
|
||||||
not_modified_since: Lsn(3),
|
|
||||||
},
|
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -2031,19 +1529,14 @@ mod tests {
|
|||||||
blkno: 7,
|
blkno: 7,
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
||||||
hdr: PagestreamRequest {
|
request_lsn: Lsn(4),
|
||||||
reqid: 0,
|
not_modified_since: Lsn(3),
|
||||||
request_lsn: Lsn(4),
|
|
||||||
not_modified_since: Lsn(3),
|
|
||||||
},
|
|
||||||
dbnode: 7,
|
dbnode: 7,
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
for msg in messages {
|
for msg in messages {
|
||||||
let bytes = msg.serialize();
|
let bytes = msg.serialize();
|
||||||
let reconstructed =
|
let reconstructed = PagestreamFeMessage::parse(&mut bytes.reader()).unwrap();
|
||||||
PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V3)
|
|
||||||
.unwrap();
|
|
||||||
assert!(msg == reconstructed);
|
assert!(msg == reconstructed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2206,45 +1699,4 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tenant_config_patch_request_serde() {
|
|
||||||
let patch_request = TenantConfigPatchRequest {
|
|
||||||
tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
|
|
||||||
config: TenantConfigPatch {
|
|
||||||
checkpoint_distance: FieldPatch::Upsert(42),
|
|
||||||
gc_horizon: FieldPatch::Remove,
|
|
||||||
compaction_threshold: FieldPatch::Noop,
|
|
||||||
..TenantConfigPatch::default()
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let json = serde_json::to_string(&patch_request).unwrap();
|
|
||||||
|
|
||||||
let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
|
|
||||||
assert_eq!(json, expected);
|
|
||||||
|
|
||||||
let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
|
|
||||||
assert_eq!(decoded.tenant_id, patch_request.tenant_id);
|
|
||||||
assert_eq!(decoded.config, patch_request.config);
|
|
||||||
|
|
||||||
// Now apply the patch to a config to demonstrate semantics
|
|
||||||
|
|
||||||
let base = TenantConfig {
|
|
||||||
checkpoint_distance: Some(28),
|
|
||||||
gc_horizon: Some(100),
|
|
||||||
compaction_target_size: Some(1024),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let expected = TenantConfig {
|
|
||||||
checkpoint_distance: Some(42),
|
|
||||||
gc_horizon: None,
|
|
||||||
..base.clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
let patched = base.apply_patch(decoded.config);
|
|
||||||
|
|
||||||
assert_eq!(patched, expected);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -173,11 +173,7 @@ impl ShardIdentity {
|
|||||||
|
|
||||||
/// Return true if the key should be stored on all shards, not just one.
|
/// Return true if the key should be stored on all shards, not just one.
|
||||||
pub fn is_key_global(&self, key: &Key) -> bool {
|
pub fn is_key_global(&self, key: &Key) -> bool {
|
||||||
if key.is_slru_block_key()
|
if key.is_slru_block_key() || key.is_slru_segment_size_key() || key.is_aux_file_key() {
|
||||||
|| key.is_slru_segment_size_key()
|
|
||||||
|| key.is_aux_file_key()
|
|
||||||
|| key.is_slru_dir_key()
|
|
||||||
{
|
|
||||||
// Special keys that are only stored on shard 0
|
// Special keys that are only stored on shard 0
|
||||||
false
|
false
|
||||||
} else if key.is_rel_block_key() {
|
} else if key.is_rel_block_key() {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user