mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-03 02:30:37 +00:00
Compare commits
4 Commits
lfc_bug_fi
...
conrad/pro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01475c9e75 | ||
|
|
c835bbba1f | ||
|
|
f94dde4432 | ||
|
|
4991a85704 |
@@ -3,16 +3,6 @@
|
|||||||
# by the RUSTDOCFLAGS env var in CI.
|
# by the RUSTDOCFLAGS env var in CI.
|
||||||
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
||||||
|
|
||||||
# Enable frame pointers. This may have a minor performance overhead, but makes it easier and more
|
|
||||||
# efficient to obtain stack traces (and thus CPU/heap profiles). It may also avoid seg faults that
|
|
||||||
# we've seen with libunwind-based profiling. See also:
|
|
||||||
#
|
|
||||||
# * <https://www.brendangregg.com/blog/2024-03-17/the-return-of-the-frame-pointers.html>
|
|
||||||
# * <https://github.com/rust-lang/rust/pull/122646>
|
|
||||||
#
|
|
||||||
# NB: the RUSTFLAGS envvar will replace this. Make sure to update e.g. Dockerfile as well.
|
|
||||||
rustflags = ["-Cforce-frame-pointers=yes"]
|
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
neon = ["run", "--bin", "neon_local"]
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/bug-template.md
vendored
1
.github/ISSUE_TEMPLATE/bug-template.md
vendored
@@ -3,7 +3,6 @@ name: Bug Template
|
|||||||
about: Used for describing bugs
|
about: Used for describing bugs
|
||||||
title: ''
|
title: ''
|
||||||
labels: t/bug
|
labels: t/bug
|
||||||
type: Bug
|
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/epic-template.md
vendored
1
.github/ISSUE_TEMPLATE/epic-template.md
vendored
@@ -4,7 +4,6 @@ about: A set of related tasks contributing towards specific outcome, comprising
|
|||||||
more than 1 week of work.
|
more than 1 week of work.
|
||||||
title: 'Epic: '
|
title: 'Epic: '
|
||||||
labels: t/Epic
|
labels: t/Epic
|
||||||
type: Epic
|
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
7
.github/actionlint.yml
vendored
7
.github/actionlint.yml
vendored
@@ -4,7 +4,6 @@ self-hosted-runner:
|
|||||||
- large
|
- large
|
||||||
- large-arm64
|
- large-arm64
|
||||||
- small
|
- small
|
||||||
- small-metal
|
|
||||||
- small-arm64
|
- small-arm64
|
||||||
- us-east-2
|
- us-east-2
|
||||||
config-variables:
|
config-variables:
|
||||||
@@ -22,9 +21,3 @@ config-variables:
|
|||||||
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
|
||||||
- DEV_AWS_OIDC_ROLE_ARN
|
- DEV_AWS_OIDC_ROLE_ARN
|
||||||
- BENCHMARK_INGEST_TARGET_PROJECTID
|
- BENCHMARK_INGEST_TARGET_PROJECTID
|
||||||
- PGREGRESS_PG16_PROJECT_ID
|
|
||||||
- PGREGRESS_PG17_PROJECT_ID
|
|
||||||
- SLACK_ON_CALL_QA_STAGING_STREAM
|
|
||||||
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN
|
|
||||||
- SLACK_ON_CALL_STORAGE_STAGING_STREAM
|
|
||||||
- SLACK_CICD_CHANNEL_ID
|
|
||||||
|
|||||||
@@ -7,9 +7,10 @@ inputs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
base-url:
|
base-url:
|
||||||
@@ -83,11 +84,12 @@ runs:
|
|||||||
ALLURE_VERSION: 2.27.0
|
ALLURE_VERSION: 2.27.0
|
||||||
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
|||||||
14
.github/actions/allure-report-store/action.yml
vendored
14
.github/actions/allure-report-store/action.yml
vendored
@@ -8,9 +8,10 @@ inputs:
|
|||||||
unique-key:
|
unique-key:
|
||||||
description: 'string to distinguish different results in the same run'
|
description: 'string to distinguish different results in the same run'
|
||||||
required: true
|
required: true
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -35,11 +36,12 @@ runs:
|
|||||||
env:
|
env:
|
||||||
REPORT_DIR: ${{ inputs.report-dir }}
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
|
|||||||
9
.github/actions/download/action.yml
vendored
9
.github/actions/download/action.yml
vendored
@@ -15,19 +15,10 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: 'OIDC role arn to interract with S3'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Download artifact
|
- name: Download artifact
|
||||||
id: download-artifact
|
id: download-artifact
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
54
.github/actions/neon-project-create/action.yml
vendored
54
.github/actions/neon-project-create/action.yml
vendored
@@ -17,34 +17,6 @@ inputs:
|
|||||||
compute_units:
|
compute_units:
|
||||||
description: '[Min, Max] compute units'
|
description: '[Min, Max] compute units'
|
||||||
default: '[1, 1]'
|
default: '[1, 1]'
|
||||||
# settings below only needed if you want the project to be sharded from the beginning
|
|
||||||
shard_split_project:
|
|
||||||
description: 'by default new projects are not shard-split, specify true to shard-split'
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
admin_api_key:
|
|
||||||
description: 'Admin API Key needed for shard-splitting. Must be specified if shard_split_project is true'
|
|
||||||
required: false
|
|
||||||
shard_count:
|
|
||||||
description: 'Number of shards to split the project into, only applies if shard_split_project is true'
|
|
||||||
required: false
|
|
||||||
default: '8'
|
|
||||||
stripe_size:
|
|
||||||
description: 'Stripe size, optional, in 8kiB pages. e.g. set 2048 for 16MB stripes. Default is 128 MiB, only applies if shard_split_project is true'
|
|
||||||
required: false
|
|
||||||
default: '32768'
|
|
||||||
psql_path:
|
|
||||||
description: 'Path to psql binary - it is caller responsibility to provision the psql binary'
|
|
||||||
required: false
|
|
||||||
default: '/tmp/neon/pg_install/v16/bin/psql'
|
|
||||||
libpq_lib_path:
|
|
||||||
description: 'Path to directory containing libpq library - it is caller responsibility to provision the libpq library'
|
|
||||||
required: false
|
|
||||||
default: '/tmp/neon/pg_install/v16/lib'
|
|
||||||
project_settings:
|
|
||||||
description: 'A JSON object with project settings'
|
|
||||||
required: false
|
|
||||||
default: '{}'
|
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
@@ -76,7 +48,7 @@ runs:
|
|||||||
\"provisioner\": \"k8s-neonvm\",
|
\"provisioner\": \"k8s-neonvm\",
|
||||||
\"autoscaling_limit_min_cu\": ${MIN_CU},
|
\"autoscaling_limit_min_cu\": ${MIN_CU},
|
||||||
\"autoscaling_limit_max_cu\": ${MAX_CU},
|
\"autoscaling_limit_max_cu\": ${MAX_CU},
|
||||||
\"settings\": ${PROJECT_SETTINGS}
|
\"settings\": { }
|
||||||
}
|
}
|
||||||
}")
|
}")
|
||||||
|
|
||||||
@@ -91,23 +63,6 @@ runs:
|
|||||||
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
echo "Project ${project_id} has been created"
|
echo "Project ${project_id} has been created"
|
||||||
|
|
||||||
if [ "${SHARD_SPLIT_PROJECT}" = "true" ]; then
|
|
||||||
# determine tenant ID
|
|
||||||
TENANT_ID=`${PSQL} ${dsn} -t -A -c "SHOW neon.tenant_id"`
|
|
||||||
|
|
||||||
echo "Splitting project ${project_id} with tenant_id ${TENANT_ID} into $((SHARD_COUNT)) shards with stripe size $((STRIPE_SIZE))"
|
|
||||||
|
|
||||||
echo "Sending PUT request to https://${API_HOST}/regions/${REGION_ID}/api/v1/admin/storage/proxy/control/v1/tenant/${TENANT_ID}/shard_split"
|
|
||||||
echo "with body {\"new_shard_count\": $((SHARD_COUNT)), \"new_stripe_size\": $((STRIPE_SIZE))}"
|
|
||||||
|
|
||||||
# we need an ADMIN API KEY to invoke storage controller API for shard splitting (bash -u above checks that the variable is set)
|
|
||||||
curl -X PUT \
|
|
||||||
"https://${API_HOST}/regions/${REGION_ID}/api/v1/admin/storage/proxy/control/v1/tenant/${TENANT_ID}/shard_split" \
|
|
||||||
-H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer ${ADMIN_API_KEY}" \
|
|
||||||
-d "{\"new_shard_count\": $SHARD_COUNT, \"new_stripe_size\": $STRIPE_SIZE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
API_HOST: ${{ inputs.api_host }}
|
API_HOST: ${{ inputs.api_host }}
|
||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
@@ -115,10 +70,3 @@ runs:
|
|||||||
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
||||||
MIN_CU: ${{ fromJSON(inputs.compute_units)[0] }}
|
MIN_CU: ${{ fromJSON(inputs.compute_units)[0] }}
|
||||||
MAX_CU: ${{ fromJSON(inputs.compute_units)[1] }}
|
MAX_CU: ${{ fromJSON(inputs.compute_units)[1] }}
|
||||||
SHARD_SPLIT_PROJECT: ${{ inputs.shard_split_project }}
|
|
||||||
ADMIN_API_KEY: ${{ inputs.admin_api_key }}
|
|
||||||
SHARD_COUNT: ${{ inputs.shard_count }}
|
|
||||||
STRIPE_SIZE: ${{ inputs.stripe_size }}
|
|
||||||
PSQL: ${{ inputs.psql_path }}
|
|
||||||
LD_LIBRARY_PATH: ${{ inputs.libpq_lib_path }}
|
|
||||||
PROJECT_SETTINGS: ${{ inputs.project_settings }}
|
|
||||||
|
|||||||
20
.github/actions/run-python-test-set/action.yml
vendored
20
.github/actions/run-python-test-set/action.yml
vendored
@@ -48,9 +48,10 @@ inputs:
|
|||||||
description: 'benchmark durations JSON'
|
description: 'benchmark durations JSON'
|
||||||
required: false
|
required: false
|
||||||
default: '{}'
|
default: '{}'
|
||||||
aws-oicd-role-arn:
|
aws_oicd_role_arn:
|
||||||
description: 'OIDC role arn to interract with S3'
|
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
|
||||||
required: true
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -61,7 +62,6 @@ runs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download Neon binaries for the previous release
|
- name: Download Neon binaries for the previous release
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -70,7 +70,6 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Download compatibility snapshot
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
@@ -82,7 +81,6 @@ runs:
|
|||||||
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
# The lack of compatibility snapshot (for example, for the new Postgres version)
|
||||||
# shouldn't fail the whole job. Only relevant test should fail.
|
# shouldn't fail the whole job. Only relevant test should fail.
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
@@ -220,19 +218,17 @@ runs:
|
|||||||
# The lack of compatibility snapshot shouldn't fail the job
|
# The lack of compatibility snapshot shouldn't fail the job
|
||||||
# (for example if we didn't run the test for non build-and-test workflow)
|
# (for example if we didn't run the test for non build-and-test workflow)
|
||||||
skip-if-does-not-exist: true
|
skip-if-does-not-exist: true
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
|
||||||
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-store
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
report-dir: /tmp/test_output/allure/results
|
report-dir: /tmp/test_output/allure/results
|
||||||
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
@@ -14,11 +14,9 @@ runs:
|
|||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
skip-if-does-not-exist: true # skip if there's no previous coverage to download
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|
||||||
- name: Upload coverage data
|
- name: Upload coverage data
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
|
|||||||
11
.github/actions/upload/action.yml
vendored
11
.github/actions/upload/action.yml
vendored
@@ -14,10 +14,6 @@ inputs:
|
|||||||
prefix:
|
prefix:
|
||||||
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
|
||||||
required: false
|
required: false
|
||||||
aws-oicd-role-arn:
|
|
||||||
description: "the OIDC role arn for aws auth"
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -57,13 +53,6 @@ runs:
|
|||||||
|
|
||||||
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|||||||
13
.github/file-filters.yaml
vendored
13
.github/file-filters.yaml
vendored
@@ -1,13 +0,0 @@
|
|||||||
rust_code: ['**/*.rs', '**/Cargo.toml', '**/Cargo.lock']
|
|
||||||
rust_dependencies: ['**/Cargo.lock']
|
|
||||||
|
|
||||||
v14: ['vendor/postgres-v14/**', 'Makefile', 'pgxn/**']
|
|
||||||
v15: ['vendor/postgres-v15/**', 'Makefile', 'pgxn/**']
|
|
||||||
v16: ['vendor/postgres-v16/**', 'Makefile', 'pgxn/**']
|
|
||||||
v17: ['vendor/postgres-v17/**', 'Makefile', 'pgxn/**']
|
|
||||||
|
|
||||||
rebuild_neon_extra:
|
|
||||||
- .github/workflows/neon_extra_builds.yml
|
|
||||||
|
|
||||||
rebuild_macos:
|
|
||||||
- .github/workflows/build-macos.yml
|
|
||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ aws-rds-postgres, aws-aurora-serverless-v2-postgres, neon, neon_pg17 ]
|
platform: [ aws-rds-postgres, aws-aurora-serverless-v2-postgres, neon ]
|
||||||
database: [ clickbench, tpch, userexample ]
|
database: [ clickbench, tpch, userexample ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -41,9 +41,6 @@ jobs:
|
|||||||
neon)
|
neon)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neon_pg17)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR_PG17 }}
|
|
||||||
;;
|
|
||||||
aws-rds-postgres)
|
aws-rds-postgres)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
@@ -73,7 +70,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
|
||||||
- name: Create benchmark_restore_status table if it does not exist
|
- name: Create benchmark_restore_status table if it does not exist
|
||||||
|
|||||||
54
.github/workflows/_build-and-test-locally.yml
vendored
54
.github/workflows/_build-and-test-locally.yml
vendored
@@ -31,13 +31,12 @@ defaults:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
container:
|
container:
|
||||||
image: ${{ inputs.build-tools-image }}
|
image: ${{ inputs.build-tools-image }}
|
||||||
credentials:
|
credentials:
|
||||||
@@ -158,6 +157,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: |
|
run: |
|
||||||
|
PQ_LIB_DIR=$(pwd)/pg_install/v16/lib
|
||||||
|
export PQ_LIB_DIR
|
||||||
${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins --tests
|
${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins --tests
|
||||||
|
|
||||||
# Do install *before* running rust tests because they might recompile the
|
# Do install *before* running rust tests because they might recompile the
|
||||||
@@ -204,17 +205,12 @@ jobs:
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 18000 # 5 hours
|
|
||||||
|
|
||||||
- name: Run rust tests
|
- name: Run rust tests
|
||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
run: |
|
run: |
|
||||||
|
PQ_LIB_DIR=$(pwd)/pg_install/v16/lib
|
||||||
|
export PQ_LIB_DIR
|
||||||
LD_LIBRARY_PATH=$(pwd)/pg_install/v17/lib
|
LD_LIBRARY_PATH=$(pwd)/pg_install/v17/lib
|
||||||
export LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH
|
||||||
|
|
||||||
@@ -225,13 +221,8 @@ jobs:
|
|||||||
${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E '!package(pageserver)'
|
${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E '!package(pageserver)'
|
||||||
|
|
||||||
# run pageserver tests with different settings
|
# run pageserver tests with different settings
|
||||||
for get_vectored_concurrent_io in sequential sidecar-task; do
|
for io_engine in std-fs tokio-epoll-uring ; do
|
||||||
for io_engine in std-fs tokio-epoll-uring ; do
|
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)'
|
||||||
NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO=$get_vectored_concurrent_io \
|
|
||||||
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine \
|
|
||||||
${cov_prefix} \
|
|
||||||
cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)'
|
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Run separate tests for real S3
|
# Run separate tests for real S3
|
||||||
@@ -265,27 +256,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Check diesel schema
|
|
||||||
if: inputs.build-type == 'release' && inputs.arch == 'x64'
|
|
||||||
env:
|
|
||||||
DATABASE_URL: postgresql://localhost:1235/storage_controller
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
run: |
|
|
||||||
/tmp/neon/bin/neon_local init
|
|
||||||
/tmp/neon/bin/neon_local storage_controller start
|
|
||||||
|
|
||||||
diesel print-schema > storage_controller/src/schema.rs
|
|
||||||
|
|
||||||
if [ -n "$(git diff storage_controller/src/schema.rs)" ]; then
|
|
||||||
echo >&2 "Uncommitted changes in diesel schema"
|
|
||||||
|
|
||||||
git diff .
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
/tmp/neon/bin/neon_local storage_controller stop
|
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
@@ -295,10 +265,6 @@ jobs:
|
|||||||
regress-tests:
|
regress-tests:
|
||||||
# Don't run regression tests on debug arm64 builds
|
# Don't run regression tests on debug arm64 builds
|
||||||
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
contents: read
|
|
||||||
statuses: write
|
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
container:
|
container:
|
||||||
@@ -317,7 +283,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }}
|
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
with:
|
with:
|
||||||
@@ -329,13 +295,11 @@ jobs:
|
|||||||
real_s3_region: eu-central-1
|
real_s3_region: eu-central-1
|
||||||
rerun_failed: true
|
rerun_failed: true
|
||||||
pg_version: ${{ matrix.pg_version }}
|
pg_version: ${{ matrix.pg_version }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
BUILD_TAG: ${{ inputs.build-tag }}
|
BUILD_TAG: ${{ inputs.build-tag }}
|
||||||
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
||||||
PAGESERVER_GET_VECTORED_CONCURRENT_IO: sidecar-task
|
|
||||||
USE_LFC: ${{ matrix.lfc_state == 'with-lfc' && 'true' || 'false' }}
|
USE_LFC: ${{ matrix.lfc_state == 'with-lfc' && 'true' || 'false' }}
|
||||||
|
|
||||||
# Temporary disable this step until we figure out why it's so flaky
|
# Temporary disable this step until we figure out why it's so flaky
|
||||||
|
|||||||
89
.github/workflows/_check-codestyle-rust.yml
vendored
89
.github/workflows/_check-codestyle-rust.yml
vendored
@@ -1,89 +0,0 @@
|
|||||||
name: Check Codestyle Rust
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
build-tools-image:
|
|
||||||
description: "build-tools image"
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
archs:
|
|
||||||
description: "Json array of architectures to run on"
|
|
||||||
type: string
|
|
||||||
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
|
||||||
permissions: {}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-codestyle-rust:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch: ${{ fromJson(inputs.archs) }}
|
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
|
|
||||||
|
|
||||||
container:
|
|
||||||
image: ${{ inputs.build-tools-image }}
|
|
||||||
credentials:
|
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
|
|
||||||
- name: Cache cargo deps
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
|
||||||
|
|
||||||
# Some of our rust modules use FFI and need those to be checked
|
|
||||||
- name: Get postgres headers
|
|
||||||
run: make postgres-headers -j$(nproc)
|
|
||||||
|
|
||||||
# cargo hack runs the given cargo subcommand (clippy in this case) for all feature combinations.
|
|
||||||
# This will catch compiler & clippy warnings in all feature combinations.
|
|
||||||
# TODO: use cargo hack for build and test as well, but, that's quite expensive.
|
|
||||||
# NB: keep clippy args in sync with ./run_clippy.sh
|
|
||||||
#
|
|
||||||
# The only difference between "clippy --debug" and "clippy --release" is that in --release mode,
|
|
||||||
# #[cfg(debug_assertions)] blocks are not built. It's not worth building everything for second
|
|
||||||
# time just for that, so skip "clippy --release".
|
|
||||||
- run: |
|
|
||||||
CLIPPY_COMMON_ARGS="$( source .neon_clippy_args; echo "$CLIPPY_COMMON_ARGS")"
|
|
||||||
if [ "$CLIPPY_COMMON_ARGS" = "" ]; then
|
|
||||||
echo "No clippy args found in .neon_clippy_args"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
|
||||||
- name: Run cargo clippy (debug)
|
|
||||||
run: cargo hack --features default --ignore-unknown-features --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
|
||||||
|
|
||||||
- name: Check documentation generation
|
|
||||||
run: cargo doc --workspace --no-deps --document-private-items
|
|
||||||
env:
|
|
||||||
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
|
||||||
|
|
||||||
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
|
||||||
- name: Check formatting
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: cargo fmt --all -- --check
|
|
||||||
|
|
||||||
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
|
||||||
- name: Check rust dependencies
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: |
|
|
||||||
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
|
||||||
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
|
||||||
2
.github/workflows/actionlint.yml
vendored
2
.github/workflows/actionlint.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
|
||||||
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
|
||||||
with:
|
with:
|
||||||
fail_level: error
|
fail_on_error: true
|
||||||
filter_mode: nofilter
|
filter_mode: nofilter
|
||||||
level: error
|
level: error
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/approved-for-ci-run.yml
vendored
4
.github/workflows/approved-for-ci-run.yml
vendored
@@ -94,9 +94,7 @@ jobs:
|
|||||||
echo "LABELS_TO_ADD=${LABELS_TO_ADD}" >> ${GITHUB_OUTPUT}
|
echo "LABELS_TO_ADD=${LABELS_TO_ADD}" >> ${GITHUB_OUTPUT}
|
||||||
echo "LABELS_TO_REMOVE=${LABELS_TO_REMOVE}" >> ${GITHUB_OUTPUT}
|
echo "LABELS_TO_REMOVE=${LABELS_TO_REMOVE}" >> ${GITHUB_OUTPUT}
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- run: gh pr checkout "${PR_NUMBER}"
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
- run: git checkout -b "${BRANCH}"
|
- run: git checkout -b "${BRANCH}"
|
||||||
|
|
||||||
|
|||||||
234
.github/workflows/benchmarking.yml
vendored
234
.github/workflows/benchmarking.yml
vendored
@@ -63,15 +63,11 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- PG_VERSION: 16
|
- DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: "neon-staging"
|
PLATFORM: "neon-staging"
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
||||||
RUNNER: [ self-hosted, us-east-2, x64 ]
|
RUNNER: [ self-hosted, us-east-2, x64 ]
|
||||||
- PG_VERSION: 17
|
- DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: "neon-staging"
|
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
|
||||||
RUNNER: [ self-hosted, us-east-2, x64 ]
|
|
||||||
- PG_VERSION: 16
|
|
||||||
PLATFORM: "azure-staging"
|
PLATFORM: "azure-staging"
|
||||||
region_id: 'azure-eastus2'
|
region_id: 'azure-eastus2'
|
||||||
RUNNER: [ self-hosted, eastus2, x64 ]
|
RUNNER: [ self-hosted, eastus2, x64 ]
|
||||||
@@ -79,7 +75,7 @@ jobs:
|
|||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.PG_VERSION }}
|
DEFAULT_PG_VERSION: ${{ matrix.DEFAULT_PG_VERSION }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
@@ -109,14 +105,13 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ matrix.region_id }}
|
region_id: ${{ matrix.region_id }}
|
||||||
postgres_version: ${{ env.PG_VERSION }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Run benchmark
|
- name: Run benchmark
|
||||||
@@ -126,8 +121,8 @@ jobs:
|
|||||||
test_selection: performance
|
test_selection: performance
|
||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# to ensure tests are running in order of appears in the file.
|
# to ensure tests are running in order of appears in the file.
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
@@ -157,7 +152,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -209,7 +204,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Run Logical Replication benchmarks
|
- name: Run Logical Replication benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -220,7 +214,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -237,7 +231,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 5400
|
extra_params: -m remote_cluster --timeout 5400
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -249,7 +243,7 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -312,16 +306,11 @@ jobs:
|
|||||||
"image": [ "'"$image_default"'" ],
|
"image": [ "'"$image_default"'" ],
|
||||||
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "50gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "50gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
|
||||||
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-sharding-reuse", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-sharding-reuse", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" }]
|
||||||
{ "pg_version": 17, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 17, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 17, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
||||||
{ "pg_version": 17, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" }]
|
|
||||||
}'
|
}'
|
||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
||||||
@@ -337,15 +326,12 @@ jobs:
|
|||||||
matrix='{
|
matrix='{
|
||||||
"platform": [
|
"platform": [
|
||||||
"neonvm-captest-reuse"
|
"neonvm-captest-reuse"
|
||||||
],
|
|
||||||
"pg_version" : [
|
|
||||||
16,17
|
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
||||||
matrix=$(echo "$matrix" | jq '.include += [{ "pg_version": 16, "platform": "rds-postgres" },
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" },
|
||||||
{ "pg_version": 16, "platform": "rds-aurora" }]')
|
{ "platform": "rds-aurora" }]')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
@@ -357,14 +343,14 @@ jobs:
|
|||||||
"platform": [
|
"platform": [
|
||||||
"neonvm-captest-reuse"
|
"neonvm-captest-reuse"
|
||||||
],
|
],
|
||||||
"pg_version" : [
|
"scale": [
|
||||||
16,17
|
"10"
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
||||||
matrix=$(echo "$matrix" | jq '.include += [{ "pg_version": 16, "platform": "rds-postgres" },
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" },
|
||||||
{ "pg_version": 16, "platform": "rds-aurora" }]')
|
{ "platform": "rds-aurora", "scale": "10" }]')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
@@ -389,7 +375,7 @@ jobs:
|
|||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.pg_version }}
|
DEFAULT_PG_VERSION: ${{ matrix.pg_version }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
@@ -419,15 +405,14 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-new-many-tables", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ matrix.region_id }}
|
region_id: ${{ matrix.region_id }}
|
||||||
postgres_version: ${{ env.PG_VERSION }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
compute_units: ${{ (contains(matrix.platform, 'captest-freetier') && '[0.25, 0.25]') || '[1, 1]' }}
|
compute_units: ${{ (contains(matrix.platform, 'captest-freetier') && '[0.25, 0.25]') || '[1, 1]' }}
|
||||||
|
|
||||||
@@ -441,7 +426,7 @@ jobs:
|
|||||||
neonvm-captest-sharding-reuse)
|
neonvm-captest-sharding-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neonvm-captest-new | neonvm-captest-new-many-tables | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -458,26 +443,6 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
# we want to compare Neon project OLTP throughput and latency at scale factor 10 GB
|
|
||||||
# without (neonvm-captest-new)
|
|
||||||
# and with (neonvm-captest-new-many-tables) many relations in the database
|
|
||||||
- name: Create many relations before the run
|
|
||||||
if: contains(fromJson('["neonvm-captest-new-many-tables"]'), matrix.platform)
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_perf_many_relations
|
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
TEST_NUM_RELATIONS: 10000
|
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -486,8 +451,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -501,8 +466,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -516,8 +481,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -535,7 +500,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -560,19 +525,14 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- PLATFORM: "neonvm-captest-pgvector"
|
- PLATFORM: "neonvm-captest-pgvector"
|
||||||
RUNNER: [ self-hosted, us-east-2, x64 ]
|
RUNNER: [ self-hosted, us-east-2, x64 ]
|
||||||
postgres_version: 16
|
|
||||||
- PLATFORM: "neonvm-captest-pgvector-pg17"
|
|
||||||
RUNNER: [ self-hosted, us-east-2, x64 ]
|
|
||||||
postgres_version: 17
|
|
||||||
- PLATFORM: "azure-captest-pgvector"
|
- PLATFORM: "azure-captest-pgvector"
|
||||||
RUNNER: [ self-hosted, eastus2, x64 ]
|
RUNNER: [ self-hosted, eastus2, x64 ]
|
||||||
postgres_version: 16
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "15m"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "15m"
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "1"
|
TEST_PG_BENCH_SCALES_MATRIX: "1"
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.postgres_version }}
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
|
|
||||||
@@ -590,20 +550,32 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
# until https://github.com/neondatabase/neon/issues/8275 is fixed we temporarily install postgresql-16
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
# instead of using Neon artifacts containing pgbench
|
||||||
with:
|
- name: Install postgresql-16 where pytest expects it
|
||||||
aws-region: eu-central-1
|
run: |
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
# Just to make it easier to test things locally on macOS (with arm64)
|
||||||
role-duration-seconds: 18000 # 5 hours
|
arch=$(uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
||||||
|
|
||||||
- name: Download Neon artifact
|
cd /home/nonroot
|
||||||
uses: ./.github/actions/download
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-17/libpq5_17.2-1.pgdg120+1_${arch}.deb"
|
||||||
with:
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-client-16_16.6-1.pgdg120+1_${arch}.deb"
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
wget -q "https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-16_16.6-1.pgdg120+1_${arch}.deb"
|
||||||
path: /tmp/neon/
|
dpkg -x libpq5_17.2-1.pgdg120+1_${arch}.deb pg
|
||||||
prefix: latest
|
dpkg -x postgresql-16_16.6-1.pgdg120+1_${arch}.deb pg
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
dpkg -x postgresql-client-16_16.6-1.pgdg120+1_${arch}.deb pg
|
||||||
|
|
||||||
|
mkdir -p /tmp/neon/pg_install/v16/bin
|
||||||
|
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/pgbench /tmp/neon/pg_install/v16/bin/pgbench
|
||||||
|
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/psql /tmp/neon/pg_install/v16/bin/psql
|
||||||
|
ln -s /home/nonroot/pg/usr/lib/$(uname -m)-linux-gnu /tmp/neon/pg_install/v16/lib
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH="/home/nonroot/pg/usr/lib/$(uname -m)-linux-gnu:${LD_LIBRARY_PATH:-}"
|
||||||
|
export LD_LIBRARY_PATH
|
||||||
|
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
|
/tmp/neon/pg_install/v16/bin/pgbench --version
|
||||||
|
/tmp/neon/pg_install/v16/bin/psql --version
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -612,9 +584,6 @@ jobs:
|
|||||||
neonvm-captest-pgvector)
|
neonvm-captest-pgvector)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neonvm-captest-pgvector-pg17)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR_PG17 }}
|
|
||||||
;;
|
|
||||||
azure-captest-pgvector)
|
azure-captest-pgvector)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR_AZURE }}
|
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR_AZURE }}
|
||||||
;;
|
;;
|
||||||
@@ -626,6 +595,13 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 18000 # 5 hours
|
||||||
|
|
||||||
- name: Benchmark pgvector hnsw indexing
|
- name: Benchmark pgvector hnsw indexing
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -634,8 +610,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -649,8 +625,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600
|
extra_params: -m remote_cluster --timeout 21600
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
@@ -661,7 +637,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -696,7 +672,7 @@ jobs:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.pg_version }}
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
TEST_OLAP_COLLECT_EXPLAIN: ${{ github.event.inputs.collect_olap_explain }}
|
TEST_OLAP_COLLECT_EXPLAIN: ${{ github.event.inputs.collect_olap_explain }}
|
||||||
TEST_OLAP_COLLECT_PG_STAT_STATEMENTS: ${{ github.event.inputs.collect_pg_stat_statements }}
|
TEST_OLAP_COLLECT_PG_STAT_STATEMENTS: ${{ github.event.inputs.collect_pg_stat_statements }}
|
||||||
@@ -732,25 +708,13 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neonvm-captest-reuse)
|
neonvm-captest-reuse)
|
||||||
case "${PG_VERSION}" in
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
||||||
16)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
|
||||||
;;
|
|
||||||
17)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_CONNSTR_PG17 }}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo >&2 "Unsupported PG_VERSION=${PG_VERSION} for PLATFORM=${PLATFORM}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
||||||
@@ -774,8 +738,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -789,7 +753,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -810,7 +774,7 @@ jobs:
|
|||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
#
|
#
|
||||||
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
||||||
# if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
statuses: write
|
statuses: write
|
||||||
@@ -823,11 +787,12 @@ jobs:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.pg_version }}
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
TEST_OLAP_SCALE: ${{ matrix.scale }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
container:
|
container:
|
||||||
@@ -853,30 +818,18 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get Connstring Secret Name
|
- name: Get Connstring Secret Name
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neonvm-captest-reuse)
|
neonvm-captest-reuse)
|
||||||
case "${PG_VERSION}" in
|
ENV_PLATFORM=CAPTEST_TPCH
|
||||||
16)
|
|
||||||
CONNSTR_SECRET_NAME="BENCHMARK_CAPTEST_TPCH_S10_CONNSTR"
|
|
||||||
;;
|
|
||||||
17)
|
|
||||||
CONNSTR_SECRET_NAME="BENCHMARK_CAPTEST_TPCH_CONNSTR_PG17"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo >&2 "Unsupported PG_VERSION=${PG_VERSION} for PLATFORM=${PLATFORM}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
CONNSTR_SECRET_NAME="BENCHMARK_RDS_AURORA_TPCH_S10_CONNSTR"
|
ENV_PLATFORM=RDS_AURORA_TPCH
|
||||||
;;
|
;;
|
||||||
rds-postgres)
|
rds-postgres)
|
||||||
CONNSTR_SECRET_NAME="BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR"
|
ENV_PLATFORM=RDS_POSTGRES_TPCH
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neonvm-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neonvm-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
@@ -884,6 +837,7 @@ jobs:
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
CONNSTR_SECRET_NAME="BENCHMARK_${ENV_PLATFORM}_S${TEST_OLAP_SCALE}_CONNSTR"
|
||||||
echo "CONNSTR_SECRET_NAME=${CONNSTR_SECRET_NAME}" >> $GITHUB_ENV
|
echo "CONNSTR_SECRET_NAME=${CONNSTR_SECRET_NAME}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
@@ -901,20 +855,20 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
TEST_OLAP_SCALE: 10
|
TEST_OLAP_SCALE: ${{ matrix.scale }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -929,7 +883,7 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
user-examples-compare:
|
user-examples-compare:
|
||||||
# if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
statuses: write
|
statuses: write
|
||||||
@@ -942,7 +896,7 @@ jobs:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
PG_VERSION: ${{ matrix.pg_version }}
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
@@ -972,25 +926,13 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neonvm-captest-reuse)
|
neonvm-captest-reuse)
|
||||||
case "${PG_VERSION}" in
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
||||||
16)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
|
||||||
;;
|
|
||||||
17)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_USER_EXAMPLE_CONNSTR_PG17 }}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo >&2 "Unsupported PG_VERSION=${PG_VERSION} for PLATFORM=${PLATFORM}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_AURORA_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_AURORA_CONNSTR }}
|
||||||
@@ -1014,8 +956,8 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
pg_version: ${{ env.PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -1026,7 +968,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
|||||||
241
.github/workflows/build-macos.yml
vendored
241
.github/workflows/build-macos.yml
vendored
@@ -1,241 +0,0 @@
|
|||||||
name: Check neon with MacOS builds
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
pg_versions:
|
|
||||||
description: "Array of the pg versions to build for, for example: ['v14', 'v17']"
|
|
||||||
type: string
|
|
||||||
default: '[]'
|
|
||||||
required: false
|
|
||||||
rebuild_rust_code:
|
|
||||||
description: "Rebuild Rust code"
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
required: false
|
|
||||||
rebuild_everything:
|
|
||||||
description: "If true, rebuild for all versions"
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
required: false
|
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
COPT: '-Werror'
|
|
||||||
|
|
||||||
# TODO: move `check-*` and `files-changed` jobs to the "Caller" Workflow
|
|
||||||
# We should care about that as Github has limitations:
|
|
||||||
# - You can connect up to four levels of workflows
|
|
||||||
# - You can call a maximum of 20 unique reusable workflows from a single workflow file.
|
|
||||||
# https://docs.github.com/en/actions/sharing-automations/reusing-workflows#limitations
|
|
||||||
jobs:
|
|
||||||
build-pgxn:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
postgres-version: ${{ inputs.rebuild_everything && fromJson('["v14", "v15", "v16", "v17"]') || fromJSON(inputs.pg_versions) }}
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set pg ${{ matrix.postgres-version }} for caching
|
|
||||||
id: pg_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-${{ matrix.postgres-version }}) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres ${{ matrix.postgres-version }} build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/${{ matrix.postgres-version }}
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ matrix.postgres-version }}-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Checkout submodule vendor/postgres-${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git submodule init vendor/postgres-${{ matrix.postgres-version }}
|
|
||||||
git submodule update --depth 1 --recursive
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build Postgres ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make postgres-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Build Neon Pg Ext ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make "neon-pg-ext-${{ matrix.postgres-version }}" -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Get postgres headers ${{ matrix.postgres-version }}
|
|
||||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
make postgres-headers-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
build-walproposer-lib:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
needs: [build-pgxn]
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set pg v17 for caching
|
|
||||||
id: pg_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres v17 build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v17
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache walproposer-lib
|
|
||||||
id: cache_walproposer_lib
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/build/walproposer-lib
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Checkout submodule vendor/postgres-v17
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git submodule init vendor/postgres-v17
|
|
||||||
git submodule update --depth 1 --recursive
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build walproposer-lib (only for v17)
|
|
||||||
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
|
|
||||||
run:
|
|
||||||
make walproposer-lib -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
cargo-build:
|
|
||||||
if: |
|
|
||||||
(inputs.pg_versions != '[]' || inputs.rebuild_rust_code || inputs.rebuild_everything) && (
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
timeout-minutes: 30
|
|
||||||
runs-on: macos-15
|
|
||||||
needs: [build-pgxn, build-walproposer-lib]
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
steps:
|
|
||||||
- name: Checkout main repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
|
|
||||||
- name: Set pg v14 for caching
|
|
||||||
id: pg_rev_v14
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v15 for caching
|
|
||||||
id: pg_rev_v15
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v16 for caching
|
|
||||||
id: pg_rev_v16
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
- name: Set pg v17 for caching
|
|
||||||
id: pg_rev_v17
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
|
||||||
id: cache_pg
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v14
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v14-${{ steps.pg_rev_v14.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v15 build
|
|
||||||
id: cache_pg_v15
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v15
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v15-${{ steps.pg_rev_v15.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v16 build
|
|
||||||
id: cache_pg_v16
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v16
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v16-${{ steps.pg_rev_v16.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
- name: Cache postgres v17 build
|
|
||||||
id: cache_pg_v17
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/v17
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache cargo deps (only for v17)
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
|
||||||
|
|
||||||
- name: Cache walproposer-lib
|
|
||||||
id: cache_walproposer_lib
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: pg_install/build/walproposer-lib
|
|
||||||
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Install build dependencies
|
|
||||||
run: |
|
|
||||||
brew install flex bison openssl protobuf icu4c
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Run cargo build (only for v17)
|
|
||||||
run: cargo build --all --release -j$(sysctl -n hw.ncpu)
|
|
||||||
|
|
||||||
- name: Check that no warnings are produced (only for v17)
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
506
.github/workflows/build_and_test.yml
vendored
506
.github/workflows/build_and_test.yml
vendored
@@ -21,6 +21,8 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
|
||||||
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
|
|
||||||
@@ -45,26 +47,6 @@ jobs:
|
|||||||
run cancel-previous-in-concurrency-group.yml \
|
run cancel-previous-in-concurrency-group.yml \
|
||||||
--field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}"
|
--field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}"
|
||||||
|
|
||||||
files-changed:
|
|
||||||
needs: [ check-permissions ]
|
|
||||||
runs-on: [ self-hosted, small ]
|
|
||||||
timeout-minutes: 3
|
|
||||||
outputs:
|
|
||||||
check-rust-dependencies: ${{ steps.files-changed.outputs.rust_dependencies }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
|
|
||||||
- name: Check for file changes
|
|
||||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
|
||||||
id: files-changed
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
filters: .github/file-filters.yaml
|
|
||||||
|
|
||||||
tag:
|
tag:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
@@ -184,19 +166,77 @@ jobs:
|
|||||||
|
|
||||||
check-codestyle-rust:
|
check-codestyle-rust:
|
||||||
needs: [ check-permissions, build-build-tools-image ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
uses: ./.github/workflows/_check-codestyle-rust.yml
|
strategy:
|
||||||
with:
|
matrix:
|
||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
arch: [ x64, arm64 ]
|
||||||
archs: '["x64", "arm64"]'
|
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
check-dependencies-rust:
|
container:
|
||||||
needs: [ files-changed, build-build-tools-image ]
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
if: ${{ needs.files-changed.outputs.check-rust-dependencies == 'true' }}
|
credentials:
|
||||||
uses: ./.github/workflows/cargo-deny.yml
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
with:
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
options: --init
|
||||||
secrets: inherit
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Cache cargo deps
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
||||||
|
|
||||||
|
# Some of our rust modules use FFI and need those to be checked
|
||||||
|
- name: Get postgres headers
|
||||||
|
run: make postgres-headers -j$(nproc)
|
||||||
|
|
||||||
|
# cargo hack runs the given cargo subcommand (clippy in this case) for all feature combinations.
|
||||||
|
# This will catch compiler & clippy warnings in all feature combinations.
|
||||||
|
# TODO: use cargo hack for build and test as well, but, that's quite expensive.
|
||||||
|
# NB: keep clippy args in sync with ./run_clippy.sh
|
||||||
|
#
|
||||||
|
# The only difference between "clippy --debug" and "clippy --release" is that in --release mode,
|
||||||
|
# #[cfg(debug_assertions)] blocks are not built. It's not worth building everything for second
|
||||||
|
# time just for that, so skip "clippy --release".
|
||||||
|
- run: |
|
||||||
|
CLIPPY_COMMON_ARGS="$( source .neon_clippy_args; echo "$CLIPPY_COMMON_ARGS")"
|
||||||
|
if [ "$CLIPPY_COMMON_ARGS" = "" ]; then
|
||||||
|
echo "No clippy args found in .neon_clippy_args"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
|
- name: Run cargo clippy (debug)
|
||||||
|
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
|
- name: Check documentation generation
|
||||||
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
|
env:
|
||||||
|
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
||||||
|
|
||||||
|
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
||||||
|
- name: Check formatting
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
|
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
||||||
|
- name: Check rust dependencies
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
||||||
|
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
||||||
|
|
||||||
|
# https://github.com/EmbarkStudios/cargo-deny
|
||||||
|
- name: Check rust licenses/bans/advisories/sources
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: cargo deny check --hide-inclusion-graph
|
||||||
|
|
||||||
build-and-test-locally:
|
build-and-test-locally:
|
||||||
needs: [ tag, build-build-tools-image ]
|
needs: [ tag, build-build-tools-image ]
|
||||||
@@ -215,15 +255,15 @@ jobs:
|
|||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
build-tag: ${{ needs.tag.outputs.build-tag }}
|
build-tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds.
|
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
|
||||||
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled.
|
# run without LFC on v17 release only
|
||||||
test-cfg: |
|
test-cfg: |
|
||||||
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"},
|
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v15", "lfc_state": "with-lfc"},
|
{"pg_version":"v15", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v16", "lfc_state": "with-lfc"},
|
{"pg_version":"v16", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "with-lfc"},
|
{"pg_version":"v17", "lfc_state": "without-lfc"},
|
||||||
{"pg_version":"v17", "lfc_state": "without-lfc"}]'
|
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|
||||||
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }}
|
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
|
||||||
@@ -265,12 +305,7 @@ jobs:
|
|||||||
benchmarks:
|
benchmarks:
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
||||||
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
|
||||||
permissions:
|
runs-on: [ self-hosted, small ]
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small-metal ]
|
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
credentials:
|
credentials:
|
||||||
@@ -298,7 +333,6 @@ jobs:
|
|||||||
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
|
||||||
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
|
||||||
pg_version: v16
|
pg_version: v16
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -308,31 +342,24 @@ jobs:
|
|||||||
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
report-benchmarks-results-to-slack:
|
report-benchmarks-failures:
|
||||||
needs: [ benchmarks, create-test-report ]
|
needs: [ benchmarks, create-test-report ]
|
||||||
if: github.ref_name == 'main' && !cancelled() && contains(fromJSON('["success", "failure"]'), needs.benchmarks.result)
|
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: slackapi/slack-github-action@v2
|
- uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
method: chat.postMessage
|
channel-id: C060CNA47S9 # on-call-staging-storage-stream
|
||||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
slack-message: |
|
||||||
payload: |
|
Benchmarks failed on main <${{ github.event.head_commit.url }}|${{ github.sha }}>
|
||||||
channel: "${{ vars.SLACK_ON_CALL_STORAGE_STAGING_STREAM }}"
|
<${{ needs.create-test-report.outputs.report-url }}|Allure report>
|
||||||
text: |
|
env:
|
||||||
Benchmarks on main: *${{ needs.benchmarks.result }}*
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
- <${{ needs.create-test-report.outputs.report-url }}|Allure report>
|
|
||||||
- <${{ github.event.head_commit.url }}|${{ github.sha }}>
|
|
||||||
|
|
||||||
create-test-report:
|
create-test-report:
|
||||||
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
|
||||||
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
outputs:
|
outputs:
|
||||||
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
|
|
||||||
@@ -353,7 +380,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -385,10 +411,6 @@ jobs:
|
|||||||
coverage-report:
|
coverage-report:
|
||||||
if: ${{ !startsWith(github.ref_name, 'release') }}
|
if: ${{ !startsWith(github.ref_name, 'release') }}
|
||||||
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -415,14 +437,12 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Get coverage artifact
|
- name: Get coverage artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: coverage-data-artifact
|
name: coverage-data-artifact
|
||||||
path: /tmp/coverage
|
path: /tmp/coverage
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
@@ -497,7 +517,7 @@ jobs:
|
|||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
|
||||||
needs: [ check-permissions, promote-images-dev, tag ]
|
needs: [ check-permissions, promote-images, tag ]
|
||||||
uses: ./.github/workflows/trigger-e2e-tests.yml
|
uses: ./.github/workflows/trigger-e2e-tests.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@@ -553,10 +573,6 @@ jobs:
|
|||||||
neon-image:
|
neon-image:
|
||||||
needs: [ neon-image-arch, tag ]
|
needs: [ neon-image-arch, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -571,15 +587,11 @@ jobs:
|
|||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
|
||||||
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Push multi-arch image to ECR
|
- name: Push multi-arch image to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -588,10 +600,6 @@ jobs:
|
|||||||
|
|
||||||
compute-node-image-arch:
|
compute-node-image-arch:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -632,15 +640,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
@@ -682,17 +686,37 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
pull: true
|
pull: true
|
||||||
file: compute/compute-node.Dockerfile
|
file: compute/compute-node.Dockerfile
|
||||||
target: extension-tests
|
target: neon-pg-ext-test
|
||||||
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
tags: |
|
tags: |
|
||||||
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{needs.tag.outputs.build-tag}}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{needs.tag.outputs.build-tag}}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
|
||||||
|
- name: Build compute-tools image
|
||||||
|
# compute-tools are Postgres independent, so build it only once
|
||||||
|
# We pick 16, because that builds on debian 11 with older glibc (and is
|
||||||
|
# thus compatible with newer glibc), rather than 17 on Debian 12, as
|
||||||
|
# that isn't guaranteed to be compatible with Debian 11
|
||||||
|
if: matrix.version.pg == 'v16'
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
target: compute-tools-image
|
||||||
|
context: .
|
||||||
|
build-args: |
|
||||||
|
GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
BUILD_TAG=${{ needs.tag.outputs.build-tag }}
|
||||||
|
TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-${{ matrix.version.debian }}
|
||||||
|
DEBIAN_VERSION=${{ matrix.version.debian }}
|
||||||
|
provenance: false
|
||||||
|
push: true
|
||||||
|
pull: true
|
||||||
|
file: compute/compute-node.Dockerfile
|
||||||
|
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/compute-tools-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
|
||||||
|
tags: |
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
|
||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
needs: [ compute-node-image-arch, tag ]
|
needs: [ compute-node-image-arch, tag ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -729,21 +753,31 @@ jobs:
|
|||||||
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
||||||
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- name: Create multi-arch compute-tools image
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
if: matrix.version.pg == 'v16'
|
||||||
with:
|
run: |
|
||||||
aws-region: eu-central-1
|
docker buildx imagetools create -t neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }} \
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
-t neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }} \
|
||||||
role-duration-seconds: 3600
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
with:
|
||||||
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
|
||||||
run: |
|
run: |
|
||||||
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }} \
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }} \
|
||||||
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}
|
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
|
- name: Push multi-arch compute-tools image to ECR
|
||||||
|
if: matrix.version.pg == 'v16'
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
vm-compute-node-image:
|
vm-compute-node-image:
|
||||||
needs: [ check-permissions, tag, compute-node-image ]
|
needs: [ check-permissions, tag, compute-node-image ]
|
||||||
runs-on: [ self-hosted, large ]
|
runs-on: [ self-hosted, large ]
|
||||||
@@ -761,7 +795,7 @@ jobs:
|
|||||||
- pg: v17
|
- pg: v17
|
||||||
debian: bookworm
|
debian: bookworm
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.37.1
|
VM_BUILDER_VERSION: v0.35.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -814,17 +848,6 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Get the last compute release tag
|
|
||||||
id: get-last-compute-release-tag
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
|
||||||
run: |
|
|
||||||
tag=$(gh api -q '[.[].tag_name | select(startswith("release-compute"))][0]'\
|
|
||||||
-H "Accept: application/vnd.github+json" \
|
|
||||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
|
||||||
"/repos/${{ github.repository }}/releases")
|
|
||||||
echo tag=${tag} >> ${GITHUB_OUTPUT}
|
|
||||||
|
|
||||||
# `neondatabase/neon` contains multiple binaries, all of them use the same input for the version into the same version formatting library.
|
# `neondatabase/neon` contains multiple binaries, all of them use the same input for the version into the same version formatting library.
|
||||||
# Pick pageserver as currently the only binary with extra "version" features printed in the string to verify.
|
# Pick pageserver as currently the only binary with extra "version" features printed in the string to verify.
|
||||||
# Regular pageserver version string looks like
|
# Regular pageserver version string looks like
|
||||||
@@ -856,34 +879,18 @@ jobs:
|
|||||||
TEST_VERSION_ONLY: ${{ matrix.pg_version }}
|
TEST_VERSION_ONLY: ${{ matrix.pg_version }}
|
||||||
run: ./docker-compose/docker_compose_test.sh
|
run: ./docker-compose/docker_compose_test.sh
|
||||||
|
|
||||||
- name: Print logs and clean up docker-compose test
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml logs || true
|
|
||||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml down
|
|
||||||
|
|
||||||
- name: Test extension upgrade
|
|
||||||
timeout-minutes: 20
|
|
||||||
if: ${{ needs.tag.outputs.build-tag == github.run_id }}
|
|
||||||
env:
|
|
||||||
NEWTAG: ${{ needs.tag.outputs.build-tag }}
|
|
||||||
OLDTAG: ${{ steps.get-last-compute-release-tag.outputs.tag }}
|
|
||||||
run: ./docker-compose/test_extensions_upgrade.sh
|
|
||||||
|
|
||||||
- name: Print logs and clean up
|
- name: Print logs and clean up
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml logs || true
|
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
||||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml down
|
docker compose -f ./docker-compose/docker-compose.yml down
|
||||||
|
|
||||||
promote-images-dev:
|
promote-images:
|
||||||
needs: [ check-permissions, tag, vm-compute-node-image, neon-image ]
|
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
id-token: write # for `aws-actions/configure-aws-credentials`
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
VERSIONS: v14 v15 v16 v17
|
VERSIONS: v14 v15 v16 v17
|
||||||
@@ -894,15 +901,12 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- name: Login to dev ECR
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Copy vm-compute-node images to ECR
|
- name: Copy vm-compute-node images to ECR
|
||||||
run: |
|
run: |
|
||||||
@@ -911,35 +915,6 @@ jobs:
|
|||||||
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
done
|
done
|
||||||
|
|
||||||
promote-images-prod:
|
|
||||||
needs: [ check-permissions, tag, test-images, promote-images-dev ]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
|
||||||
VERSIONS: v14 v15 v16 v17
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Add latest tag to images
|
- name: Add latest tag to images
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
run: |
|
run: |
|
||||||
@@ -947,6 +922,9 @@ jobs:
|
|||||||
docker buildx imagetools create -t $repo/neon:latest \
|
docker buildx imagetools create -t $repo/neon:latest \
|
||||||
$repo/neon:${{ needs.tag.outputs.build-tag }}
|
$repo/neon:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
|
docker buildx imagetools create -t $repo/compute-tools:latest \
|
||||||
|
$repo/compute-tools:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
for version in ${VERSIONS}; do
|
for version in ${VERSIONS}; do
|
||||||
docker buildx imagetools create -t $repo/compute-node-${version}:latest \
|
docker buildx imagetools create -t $repo/compute-node-${version}:latest \
|
||||||
$repo/compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
$repo/compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
@@ -975,31 +953,31 @@ jobs:
|
|||||||
- name: Copy all images to prod ECR
|
- name: Copy all images to prod ECR
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
run: |
|
run: |
|
||||||
for image in neon {vm-,}compute-node-{v14,v15,v16,v17}; do
|
for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16,v17}; do
|
||||||
docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \
|
docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \
|
||||||
369495373322.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }}
|
369495373322.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }}
|
||||||
done
|
done
|
||||||
|
|
||||||
push-to-acr-dev:
|
push-to-acr-dev:
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
needs: [ tag, promote-images-dev ]
|
needs: [ tag, promote-images ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
|
||||||
image_tag: ${{ needs.tag.outputs.build-tag }}
|
image_tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
images: neon vm-compute-node-v14 vm-compute-node-v15 vm-compute-node-v16 vm-compute-node-v17 compute-node-v14 compute-node-v15 compute-node-v16 compute-node-v17
|
images: neon compute-tools vm-compute-node-v14 vm-compute-node-v15 vm-compute-node-v16 vm-compute-node-v17 compute-node-v14 compute-node-v15 compute-node-v16 compute-node-v17
|
||||||
registry_name: ${{ vars.AZURE_DEV_REGISTRY_NAME }}
|
registry_name: ${{ vars.AZURE_DEV_REGISTRY_NAME }}
|
||||||
subscription_id: ${{ vars.AZURE_DEV_SUBSCRIPTION_ID }}
|
subscription_id: ${{ vars.AZURE_DEV_SUBSCRIPTION_ID }}
|
||||||
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
||||||
|
|
||||||
push-to-acr-prod:
|
push-to-acr-prod:
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
needs: [ tag, promote-images-prod ]
|
needs: [ tag, promote-images ]
|
||||||
uses: ./.github/workflows/_push-to-acr.yml
|
uses: ./.github/workflows/_push-to-acr.yml
|
||||||
with:
|
with:
|
||||||
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
|
||||||
image_tag: ${{ needs.tag.outputs.build-tag }}
|
image_tag: ${{ needs.tag.outputs.build-tag }}
|
||||||
images: neon vm-compute-node-v14 vm-compute-node-v15 vm-compute-node-v16 vm-compute-node-v17 compute-node-v14 compute-node-v15 compute-node-v16 compute-node-v17
|
images: neon compute-tools vm-compute-node-v14 vm-compute-node-v15 vm-compute-node-v16 vm-compute-node-v17 compute-node-v14 compute-node-v15 compute-node-v16 compute-node-v17
|
||||||
registry_name: ${{ vars.AZURE_PROD_REGISTRY_NAME }}
|
registry_name: ${{ vars.AZURE_PROD_REGISTRY_NAME }}
|
||||||
subscription_id: ${{ vars.AZURE_PROD_SUBSCRIPTION_ID }}
|
subscription_id: ${{ vars.AZURE_PROD_SUBSCRIPTION_ID }}
|
||||||
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
tenant_id: ${{ vars.AZURE_TENANT_ID }}
|
||||||
@@ -1007,11 +985,6 @@ jobs:
|
|||||||
trigger-custom-extensions-build-and-wait:
|
trigger-custom-extensions-build-and-wait:
|
||||||
needs: [ check-permissions, tag ]
|
needs: [ check-permissions, tag ]
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -1084,114 +1057,15 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
needs: [ check-permissions, promote-images-prod, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
|
||||||
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Create git tag and GitHub release
|
|
||||||
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
retries: 5
|
|
||||||
script: |
|
|
||||||
const tag = "${{ needs.tag.outputs.build-tag }}";
|
|
||||||
const branch = "${{ github.ref_name }}";
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRef = await github.rest.git.getRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `tags/${tag}`,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (existingRef.data.object.sha !== context.sha) {
|
|
||||||
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Tag ${tag} does not exist. Creating it...`);
|
|
||||||
await github.rest.git.createRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: `refs/tags/${tag}`,
|
|
||||||
sha: context.sha,
|
|
||||||
});
|
|
||||||
console.log(`Tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const existingRelease = await github.rest.repos.getReleaseByTag({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag: tag,
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
|
|
||||||
} catch (error) {
|
|
||||||
if (error.status !== 404) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Release for tag ${tag} does not exist. Creating it...`);
|
|
||||||
|
|
||||||
// Find the PR number using the commit SHA
|
|
||||||
const pullRequests = await github.rest.pulls.list({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
state: 'closed',
|
|
||||||
base: branch,
|
|
||||||
});
|
|
||||||
|
|
||||||
const pr = pullRequests.data.find(pr => pr.merge_commit_sha === context.sha);
|
|
||||||
const prNumber = pr ? pr.number : null;
|
|
||||||
|
|
||||||
// Find the previous release on the branch
|
|
||||||
const releases = await github.rest.repos.listReleases({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
per_page: 100,
|
|
||||||
});
|
|
||||||
|
|
||||||
const branchReleases = releases.data
|
|
||||||
.filter((release) => {
|
|
||||||
const regex = new RegExp(`^${branch}-\\d+$`);
|
|
||||||
return regex.test(release.tag_name) && !release.draft && !release.prerelease;
|
|
||||||
})
|
|
||||||
.sort((a, b) => new Date(b.created_at) - new Date(a.created_at));
|
|
||||||
|
|
||||||
const previousTag = branchReleases.length > 0 ? branchReleases[0].tag_name : null;
|
|
||||||
|
|
||||||
const releaseNotes = [
|
|
||||||
prNumber
|
|
||||||
? `Release PR https://github.com/${context.repo.owner}/${context.repo.repo}/pull/${prNumber}.`
|
|
||||||
: 'Release PR not found.',
|
|
||||||
previousTag
|
|
||||||
? `Diff with the previous release https://github.com/${context.repo.owner}/${context.repo.repo}/compare/${previousTag}...${tag}.`
|
|
||||||
: `No previous release found on branch ${branch}.`,
|
|
||||||
].join('\n\n');
|
|
||||||
|
|
||||||
await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: tag,
|
|
||||||
body: releaseNotes,
|
|
||||||
});
|
|
||||||
console.log(`Release for tag ${tag} created successfully.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
@@ -1241,13 +1115,38 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Create git tag
|
||||||
|
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
|
||||||
|
sha: context.sha,
|
||||||
|
})
|
||||||
|
|
||||||
|
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
|
||||||
|
- name: Create GitHub release
|
||||||
|
if: github.ref_name == 'release'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: "${{ needs.tag.outputs.build-tag }}",
|
||||||
|
generate_release_notes: true,
|
||||||
|
})
|
||||||
|
|
||||||
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
|
||||||
promote-compatibility-data:
|
promote-compatibility-data:
|
||||||
needs: [ deploy ]
|
needs: [ deploy ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: read
|
|
||||||
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
|
||||||
if: github.ref_name == 'release' && !failure() && !cancelled()
|
if: github.ref_name == 'release' && !failure() && !cancelled()
|
||||||
|
|
||||||
@@ -1284,12 +1183,6 @@ jobs:
|
|||||||
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
|
||||||
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
|
||||||
|
|
||||||
- uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Promote compatibility snapshot and Neon artifact
|
- name: Promote compatibility snapshot and Neon artifact
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
@@ -1337,7 +1230,7 @@ jobs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
pin-build-tools-image:
|
pin-build-tools-image:
|
||||||
needs: [ build-build-tools-image, promote-images-prod, build-and-test-locally ]
|
needs: [ build-build-tools-image, promote-images, build-and-test-locally ]
|
||||||
if: github.ref_name == 'main'
|
if: github.ref_name == 'main'
|
||||||
uses: ./.github/workflows/pin-build-tools-image.yml
|
uses: ./.github/workflows/pin-build-tools-image.yml
|
||||||
with:
|
with:
|
||||||
@@ -1360,9 +1253,7 @@ jobs:
|
|||||||
- build-and-test-locally
|
- build-and-test-locally
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
- check-codestyle-rust
|
- check-codestyle-rust
|
||||||
- check-dependencies-rust
|
- promote-images
|
||||||
- files-changed
|
|
||||||
- promote-images-dev
|
|
||||||
- test-images
|
- test-images
|
||||||
- trigger-custom-extensions-build-and-wait
|
- trigger-custom-extensions-build-and-wait
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
@@ -1374,11 +1265,4 @@ jobs:
|
|||||||
if: |
|
if: |
|
||||||
contains(needs.*.result, 'failure')
|
contains(needs.*.result, 'failure')
|
||||||
|| contains(needs.*.result, 'cancelled')
|
|| contains(needs.*.result, 'cancelled')
|
||||||
|| (needs.check-dependencies-rust.result == 'skipped' && needs.files-changed.outputs.check-rust-dependencies == 'true')
|
|| contains(needs.*.result, 'skipped')
|
||||||
|| needs.build-and-test-locally.result == 'skipped'
|
|
||||||
|| needs.check-codestyle-python.result == 'skipped'
|
|
||||||
|| needs.check-codestyle-rust.result == 'skipped'
|
|
||||||
|| needs.files-changed.result == 'skipped'
|
|
||||||
|| needs.promote-images-dev.result == 'skipped'
|
|
||||||
|| needs.test-images.result == 'skipped'
|
|
||||||
|| needs.trigger-custom-extensions-build-and-wait.result == 'skipped'
|
|
||||||
|
|||||||
57
.github/workflows/cargo-deny.yml
vendored
57
.github/workflows/cargo-deny.yml
vendored
@@ -1,57 +0,0 @@
|
|||||||
name: cargo deny checks
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
build-tools-image:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
schedule:
|
|
||||||
- cron: '0 0 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
cargo-deny:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
ref: >-
|
|
||||||
${{
|
|
||||||
fromJSON(
|
|
||||||
github.event_name == 'schedule'
|
|
||||||
&& '["main","release","release-proxy","release-compute"]'
|
|
||||||
|| format('["{0}"]', github.sha)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
|
|
||||||
runs-on: [self-hosted, small]
|
|
||||||
|
|
||||||
container:
|
|
||||||
image: ${{ inputs.build-tools-image || 'neondatabase/build-tools:pinned' }}
|
|
||||||
credentials:
|
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ matrix.ref }}
|
|
||||||
|
|
||||||
- name: Check rust licenses/bans/advisories/sources
|
|
||||||
env:
|
|
||||||
CARGO_DENY_TARGET: >-
|
|
||||||
${{ github.event_name == 'schedule' && 'advisories' || 'all' }}
|
|
||||||
run: cargo deny check --hide-inclusion-graph $CARGO_DENY_TARGET
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
|
||||||
if: ${{ github.event_name == 'schedule' && failure() }}
|
|
||||||
uses: slackapi/slack-github-action@v2
|
|
||||||
with:
|
|
||||||
method: chat.postMessage
|
|
||||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
||||||
payload: |
|
|
||||||
channel: ${{ vars.SLACK_CICD_CHANNEL_ID }}
|
|
||||||
text: |
|
|
||||||
Periodic cargo-deny on ${{ matrix.ref }}: ${{ job.status }}
|
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
|
||||||
Pinging @oncall-devprod.
|
|
||||||
46
.github/workflows/cloud-regress.yml
vendored
46
.github/workflows/cloud-regress.yml
vendored
@@ -19,21 +19,15 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}
|
group: ${{ github.workflow }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
regress:
|
regress:
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 16
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
strategy:
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
fail-fast: false
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
matrix:
|
|
||||||
pg-version: [16, 17]
|
|
||||||
|
|
||||||
runs-on: us-east-2
|
runs-on: us-east-2
|
||||||
container:
|
container:
|
||||||
@@ -46,11 +40,9 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Patch the test
|
- name: Patch the test
|
||||||
env:
|
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd "vendor/postgres-v${PG_VERSION}"
|
cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
|
||||||
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch"
|
patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
|
||||||
|
|
||||||
- name: Generate a random password
|
- name: Generate a random password
|
||||||
id: pwgen
|
id: pwgen
|
||||||
@@ -63,9 +55,8 @@ jobs:
|
|||||||
- name: Change tests according to the generated password
|
- name: Change tests according to the generated password
|
||||||
env:
|
env:
|
||||||
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
|
||||||
PG_VERSION: ${{matrix.pg-version}}
|
|
||||||
run: |
|
run: |
|
||||||
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress
|
cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
|
||||||
for fname in sql/*.sql expected/*.out; do
|
for fname in sql/*.sql expected/*.out; do
|
||||||
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
|
||||||
done
|
done
|
||||||
@@ -81,46 +72,27 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create a new branch
|
|
||||||
id: create-branch
|
|
||||||
uses: ./.github/actions/neon-branch-create
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
|
|
||||||
- name: Run the regression tests
|
- name: Run the regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
test_selection: cloud_regress
|
test_selection: cloud_regress
|
||||||
pg_version: ${{matrix.pg-version}}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}}
|
BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
|
||||||
|
|
||||||
- name: Delete branch
|
|
||||||
if: always()
|
|
||||||
uses: ./.github/actions/neon-branch-delete
|
|
||||||
with:
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
|
|
||||||
branch_id: ${{steps.create-branch.outputs.branch_id}}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }}
|
channel-id: "C033QLM5P7D" # on-call-staging-stream
|
||||||
slack-message: |
|
slack-message: |
|
||||||
Periodic pg_regress on staging: ${{ job.status }}
|
Periodic pg_regress on staging: ${{ job.status }}
|
||||||
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>
|
||||||
|
|||||||
46
.github/workflows/ingest_benchmark.yml
vendored
46
.github/workflows/ingest_benchmark.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||||
- cron: '0 9 * * *' # run once a day, timezone is utc
|
- cron: '0 9 * * *' # run once a day, timezone is utc
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
@@ -28,24 +28,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # allow other variants to continue even if one fails
|
fail-fast: false # allow other variants to continue even if one fails
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
target_project: [new_empty_project, large_existing_project]
|
||||||
- target_project: new_empty_project_stripe_size_2048
|
|
||||||
stripe_size: 2048 # 16 MiB
|
|
||||||
postgres_version: 16
|
|
||||||
- target_project: new_empty_project_stripe_size_32768
|
|
||||||
stripe_size: 32768 # 256 MiB # note that this is different from null because using null will shard_split the project only if it reaches the threshold
|
|
||||||
# while here it is sharded from the beginning with a shard size of 256 MiB
|
|
||||||
postgres_version: 16
|
|
||||||
- target_project: new_empty_project
|
|
||||||
stripe_size: null # run with neon defaults which will shard split only when reaching the threshold
|
|
||||||
postgres_version: 16
|
|
||||||
- target_project: new_empty_project
|
|
||||||
stripe_size: null # run with neon defaults which will shard split only when reaching the threshold
|
|
||||||
postgres_version: 17
|
|
||||||
- target_project: large_existing_project
|
|
||||||
stripe_size: null # cannot re-shared or choose different stripe size for existing, already sharded project
|
|
||||||
postgres_version: 16
|
|
||||||
max-parallel: 1 # we want to run each stripe size sequentially to be able to compare the results
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
statuses: write
|
statuses: write
|
||||||
@@ -73,7 +56,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
aws-region: eu-central-1
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||||
|
|
||||||
- name: Download Neon artifact
|
- name: Download Neon artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
@@ -81,24 +64,19 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: ${{ startsWith(matrix.target_project, 'new_empty_project') }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
id: create-neon-project-ingest-target
|
id: create-neon-project-ingest-target
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: aws-us-east-2
|
region_id: aws-us-east-2
|
||||||
postgres_version: ${{ matrix.postgres_version }}
|
postgres_version: 16
|
||||||
compute_units: '[7, 7]' # we want to test large compute here to avoid compute-side bottleneck
|
compute_units: '[7, 7]' # we want to test large compute here to avoid compute-side bottleneck
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
shard_split_project: ${{ matrix.stripe_size != null && 'true' || 'false' }}
|
|
||||||
admin_api_key: ${{ secrets.NEON_STAGING_ADMIN_API_KEY }}
|
|
||||||
shard_count: 8
|
|
||||||
stripe_size: ${{ matrix.stripe_size }}
|
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project
|
||||||
if: ${{ startsWith(matrix.target_project, 'new_empty_project') }}
|
if: ${{ matrix.target_project == 'new_empty_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-project-ingest-target.outputs.dsn }}
|
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-project-ingest-target.outputs.dsn }}
|
||||||
NEW_PROJECT_ID: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
NEW_PROJECT_ID: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
||||||
@@ -116,7 +94,7 @@ jobs:
|
|||||||
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Initialize Neon project
|
- name: Initialize Neon project
|
||||||
if: ${{ matrix.target_project == 'large_existing_project' }}
|
if: ${{ matrix.target_project == 'large_existing_project' }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
|
||||||
@@ -144,16 +122,16 @@ jobs:
|
|||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
|
||||||
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Invoke pgcopydb
|
- name: Invoke pgcopydb
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
build_type: remote
|
build_type: remote
|
||||||
test_selection: performance/test_perf_ingest_using_pgcopydb.py
|
test_selection: performance/test_perf_ingest_using_pgcopydb.py
|
||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
|
||||||
pg_version: v${{ matrix.postgres_version }}
|
pg_version: v16
|
||||||
save_perf_report: true
|
save_perf_report: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||||
env:
|
env:
|
||||||
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
|
||||||
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
|
||||||
@@ -165,9 +143,9 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
|
||||||
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() && startsWith(matrix.target_project, 'new_empty_project') }}
|
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
with:
|
with:
|
||||||
project_id: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
project_id: ${{ steps.create-neon-project-ingest-target.outputs.project_id }}
|
||||||
|
|||||||
154
.github/workflows/neon_extra_builds.yml
vendored
154
.github/workflows/neon_extra_builds.yml
vendored
@@ -31,15 +31,19 @@ jobs:
|
|||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
files-changed:
|
check-macos-build:
|
||||||
name: Detect what files changed
|
needs: [ check-permissions ]
|
||||||
runs-on: ubuntu-22.04
|
if: |
|
||||||
timeout-minutes: 3
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
||||||
outputs:
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
v17: ${{ steps.files_changed.outputs.v17 }}
|
github.ref_name == 'main'
|
||||||
postgres_changes: ${{ steps.postgres_changes.outputs.changes }}
|
timeout-minutes: 90
|
||||||
rebuild_rust_code: ${{ steps.files_changed.outputs.rust_code }}
|
runs-on: macos-15
|
||||||
rebuild_everything: ${{ steps.files_changed.outputs.rebuild_neon_extra || steps.files_changed.outputs.rebuild_macos }}
|
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -47,45 +51,102 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Check for Postgres changes
|
- name: Install macOS postgres dependencies
|
||||||
uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 #v3
|
run: brew install flex bison openssl protobuf icu4c
|
||||||
id: files_changed
|
|
||||||
|
- name: Set pg 14 revision for caching
|
||||||
|
id: pg_v14_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 15 revision for caching
|
||||||
|
id: pg_v15_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 16 revision for caching
|
||||||
|
id: pg_v16_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 17 revision for caching
|
||||||
|
id: pg_v17_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg_14
|
||||||
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ github.token }}
|
path: pg_install/v14
|
||||||
filters: .github/file-filters.yaml
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
base: ${{ github.event_name != 'pull_request' && (github.event.merge_group.base_ref || github.ref_name) || '' }}
|
|
||||||
ref: ${{ github.event_name != 'pull_request' && (github.event.merge_group.head_ref || github.ref) || '' }}
|
|
||||||
|
|
||||||
- name: Filter out only v-string for build matrix
|
- name: Cache postgres v15 build
|
||||||
id: postgres_changes
|
id: cache_pg_15
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v16 build
|
||||||
|
id: cache_pg_16
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v16
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v17 build
|
||||||
|
id: cache_pg_17
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: pg_install/v17
|
||||||
|
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
run: |
|
run: |
|
||||||
v_strings_only_as_json_array=$(echo ${{ steps.files_changed.outputs.chnages }} | jq '.[]|select(test("v\\d+"))' | jq --slurp -c)
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
echo "changes=${v_strings_only_as_json_array}" | tee -a "${GITHUB_OUTPUT}"
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
check-macos-build:
|
- name: Cache cargo deps
|
||||||
needs: [ check-permissions, files-changed ]
|
uses: actions/cache@v4
|
||||||
if: |
|
with:
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
|
path: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
~/.cargo/registry
|
||||||
github.ref_name == 'main'
|
!~/.cargo/registry/src
|
||||||
uses: ./.github/workflows/build-macos.yml
|
~/.cargo/git
|
||||||
with:
|
target
|
||||||
pg_versions: ${{ needs.files-changed.outputs.postgres_changes }}
|
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
||||||
rebuild_rust_code: ${{ needs.files-changed.outputs.rebuild_rust_code }}
|
|
||||||
rebuild_everything: ${{ fromJson(needs.files-changed.outputs.rebuild_everything) }}
|
- name: Build postgres v14
|
||||||
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v14 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v15
|
||||||
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v15 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v16
|
||||||
|
if: steps.cache_pg_16.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v16 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build postgres v17
|
||||||
|
if: steps.cache_pg_17.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v17 -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build neon extensions
|
||||||
|
run: make neon-pg-ext -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Build walproposer-lib
|
||||||
|
run: make walproposer-lib -j$(sysctl -n hw.ncpu)
|
||||||
|
|
||||||
|
- name: Run cargo build
|
||||||
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v16/lib cargo build --all --release
|
||||||
|
|
||||||
|
- name: Check that no warnings are produced
|
||||||
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
needs: [ check-permissions, build-build-tools-image, files-changed ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
if: |
|
if: |
|
||||||
(needs.files-changed.outputs.v17 == 'true' || needs.files-changed.outputs.rebuild_everything == 'true') && (
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
github.ref_name == 'main'
|
||||||
github.ref_name == 'main'
|
|
||||||
)
|
|
||||||
runs-on: [ self-hosted, large ]
|
runs-on: [ self-hosted, large ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
|
||||||
@@ -114,20 +175,15 @@ jobs:
|
|||||||
run: make walproposer-lib -j$(nproc)
|
run: make walproposer-lib -j$(nproc)
|
||||||
|
|
||||||
- name: Produce the build stats
|
- name: Produce the build stats
|
||||||
run: cargo build --all --release --timings -j$(nproc)
|
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
id: upload-stats
|
id: upload-stats
|
||||||
env:
|
env:
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
run: |
|
run: |
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
|||||||
27
.github/workflows/periodic_pagebench.yml
vendored
27
.github/workflows/periodic_pagebench.yml
vendored
@@ -27,11 +27,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trigger_bench_on_ec2_machine_in_eu_central_1:
|
trigger_bench_on_ec2_machine_in_eu_central_1:
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: [ self-hosted, small ]
|
runs-on: [ self-hosted, small ]
|
||||||
container:
|
container:
|
||||||
image: neondatabase/build-tools:pinned-bookworm
|
image: neondatabase/build-tools:pinned-bookworm
|
||||||
@@ -43,6 +38,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
|
||||||
RUN_ID: ${{ github.run_id }}
|
RUN_ID: ${{ github.run_id }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
|
||||||
AWS_DEFAULT_REGION : "eu-central-1"
|
AWS_DEFAULT_REGION : "eu-central-1"
|
||||||
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
|
||||||
steps:
|
steps:
|
||||||
@@ -53,13 +50,6 @@ jobs:
|
|||||||
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
|
||||||
run: curl https://ifconfig.me
|
run: curl https://ifconfig.me
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Start EC2 instance and wait for the instance to boot up
|
- name: Start EC2 instance and wait for the instance to boot up
|
||||||
run: |
|
run: |
|
||||||
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
|
||||||
@@ -134,10 +124,11 @@ jobs:
|
|||||||
cat "test_log_${GITHUB_RUN_ID}"
|
cat "test_log_${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -157,14 +148,6 @@ jobs:
|
|||||||
-H "Authorization: Bearer $API_KEY" \
|
-H "Authorization: Bearer $API_KEY" \
|
||||||
-d ''
|
-d ''
|
||||||
|
|
||||||
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
|
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
|
|
||||||
- name: Stop EC2 instance and wait for the instance to be stopped
|
- name: Stop EC2 instance and wait for the instance to be stopped
|
||||||
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
18
.github/workflows/pg-clients.yml
vendored
18
.github/workflows/pg-clients.yml
vendored
@@ -12,8 +12,8 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/pg-clients.yml'
|
- '.github/workflows/pg-clients.yml'
|
||||||
- 'test_runner/pg_clients/**/*.py'
|
- 'test_runner/pg_clients/**'
|
||||||
- 'test_runner/logical_repl/**/*.py'
|
- 'test_runner/logical_repl/**'
|
||||||
- 'poetry.lock'
|
- 'poetry.lock'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
@@ -25,13 +25,11 @@ defaults:
|
|||||||
run:
|
run:
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # aws-actions/configure-aws-credentials
|
|
||||||
statuses: write # require for posting a status update
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 16
|
DEFAULT_PG_VERSION: 16
|
||||||
PLATFORM: neon-captest-new
|
PLATFORM: neon-captest-new
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
AWS_DEFAULT_REGION: eu-central-1
|
AWS_DEFAULT_REGION: eu-central-1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -96,7 +94,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -104,8 +101,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
project_settings: >-
|
|
||||||
{"enable_logical_replication": true}
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -115,7 +110,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -132,7 +126,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
@@ -166,7 +159,6 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||||
path: /tmp/neon/
|
path: /tmp/neon/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
@@ -183,7 +175,6 @@ jobs:
|
|||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
extra_params: -m remote_cluster
|
extra_params: -m remote_cluster
|
||||||
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
|
||||||
@@ -200,7 +191,6 @@ jobs:
|
|||||||
uses: ./.github/actions/allure-report-generate
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
store-test-results-into-db: true
|
store-test-results-into-db: true
|
||||||
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
||||||
env:
|
env:
|
||||||
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/pin-build-tools-image.yml
vendored
14
.github/workflows/pin-build-tools-image.yml
vendored
@@ -67,7 +67,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # for `azure/login` and aws auth
|
id-token: write # for `azure/login`
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
@@ -75,15 +75,11 @@ jobs:
|
|||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure AWS credentials
|
- uses: docker/login-action@v3
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
with:
|
||||||
aws-region: eu-central-1
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
role-duration-seconds: 3600
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Login to Amazon Dev ECR
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
|
|
||||||
- name: Azure login
|
- name: Azure login
|
||||||
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1
|
||||||
|
|||||||
46
.github/workflows/pre-merge-checks.yml
vendored
46
.github/workflows/pre-merge-checks.yml
vendored
@@ -1,12 +1,6 @@
|
|||||||
name: Pre-merge checks
|
name: Pre-merge checks
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- .github/workflows/_check-codestyle-python.yml
|
|
||||||
- .github/workflows/_check-codestyle-rust.yml
|
|
||||||
- .github/workflows/build-build-tools-image.yml
|
|
||||||
- .github/workflows/pre-merge-checks.yml
|
|
||||||
merge_group:
|
merge_group:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@@ -23,10 +17,8 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
outputs:
|
outputs:
|
||||||
python-changed: ${{ steps.python-src.outputs.any_changed }}
|
python-changed: ${{ steps.python-src.outputs.any_changed }}
|
||||||
rust-changed: ${{ steps.rust-src.outputs.any_changed }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: tj-actions/changed-files@4edd678ac3f81e2dc578756871e4d00c19191daf # v45.0.4
|
- uses: tj-actions/changed-files@4edd678ac3f81e2dc578756871e4d00c19191daf # v45.0.4
|
||||||
id: python-src
|
id: python-src
|
||||||
with:
|
with:
|
||||||
@@ -38,31 +30,14 @@ jobs:
|
|||||||
poetry.lock
|
poetry.lock
|
||||||
pyproject.toml
|
pyproject.toml
|
||||||
|
|
||||||
- uses: tj-actions/changed-files@4edd678ac3f81e2dc578756871e4d00c19191daf # v45.0.4
|
|
||||||
id: rust-src
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
.github/workflows/_check-codestyle-rust.yml
|
|
||||||
.github/workflows/build-build-tools-image.yml
|
|
||||||
.github/workflows/pre-merge-checks.yml
|
|
||||||
**/**.rs
|
|
||||||
**/Cargo.toml
|
|
||||||
Cargo.toml
|
|
||||||
Cargo.lock
|
|
||||||
|
|
||||||
- name: PRINT ALL CHANGED FILES FOR DEBUG PURPOSES
|
- name: PRINT ALL CHANGED FILES FOR DEBUG PURPOSES
|
||||||
env:
|
env:
|
||||||
PYTHON_CHANGED_FILES: ${{ steps.python-src.outputs.all_changed_files }}
|
PYTHON_CHANGED_FILES: ${{ steps.python-src.outputs.all_changed_files }}
|
||||||
RUST_CHANGED_FILES: ${{ steps.rust-src.outputs.all_changed_files }}
|
|
||||||
run: |
|
run: |
|
||||||
echo "${PYTHON_CHANGED_FILES}"
|
echo "${PYTHON_CHANGED_FILES}"
|
||||||
echo "${RUST_CHANGED_FILES}"
|
|
||||||
|
|
||||||
build-build-tools-image:
|
build-build-tools-image:
|
||||||
if: |
|
if: needs.get-changed-files.outputs.python-changed == 'true'
|
||||||
false
|
|
||||||
|| needs.get-changed-files.outputs.python-changed == 'true'
|
|
||||||
|| needs.get-changed-files.outputs.rust-changed == 'true'
|
|
||||||
needs: [ get-changed-files ]
|
needs: [ get-changed-files ]
|
||||||
uses: ./.github/workflows/build-build-tools-image.yml
|
uses: ./.github/workflows/build-build-tools-image.yml
|
||||||
with:
|
with:
|
||||||
@@ -80,30 +55,17 @@ jobs:
|
|||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm-x64
|
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm-x64
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
check-codestyle-rust:
|
|
||||||
if: needs.get-changed-files.outputs.rust-changed == 'true'
|
|
||||||
needs: [ get-changed-files, build-build-tools-image ]
|
|
||||||
uses: ./.github/workflows/_check-codestyle-rust.yml
|
|
||||||
with:
|
|
||||||
# `-bookworm-x64` suffix should match the combination in `build-build-tools-image`
|
|
||||||
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm-x64
|
|
||||||
archs: '["x64"]'
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
# To get items from the merge queue merged into main we need to satisfy "Status checks that are required".
|
# To get items from the merge queue merged into main we need to satisfy "Status checks that are required".
|
||||||
# Currently we require 2 jobs (checks with exact name):
|
# Currently we require 2 jobs (checks with exact name):
|
||||||
# - conclusion
|
# - conclusion
|
||||||
# - neon-cloud-e2e
|
# - neon-cloud-e2e
|
||||||
conclusion:
|
conclusion:
|
||||||
# Do not run job on Pull Requests as it interferes with the `conclusion` job from the `build_and_test` workflow
|
if: always()
|
||||||
if: always() && github.event_name == 'merge_group'
|
|
||||||
permissions:
|
permissions:
|
||||||
statuses: write # for `github.repos.createCommitStatus(...)`
|
statuses: write # for `github.repos.createCommitStatus(...)`
|
||||||
contents: write
|
|
||||||
needs:
|
needs:
|
||||||
- get-changed-files
|
- get-changed-files
|
||||||
- check-codestyle-python
|
- check-codestyle-python
|
||||||
- check-codestyle-rust
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Create fake `neon-cloud-e2e` check
|
- name: Create fake `neon-cloud-e2e` check
|
||||||
@@ -128,8 +90,6 @@ jobs:
|
|||||||
- name: Fail the job if any of the dependencies do not succeed or skipped
|
- name: Fail the job if any of the dependencies do not succeed or skipped
|
||||||
run: exit 1
|
run: exit 1
|
||||||
if: |
|
if: |
|
||||||
false
|
(contains(needs.check-codestyle-python.result, 'skipped') && needs.get-changed-files.outputs.python-changed == 'true')
|
||||||
|| (needs.check-codestyle-python.result == 'skipped' && needs.get-changed-files.outputs.python-changed == 'true')
|
|
||||||
|| (needs.check-codestyle-rust.result == 'skipped' && needs.get-changed-files.outputs.rust-changed == 'true')
|
|
||||||
|| contains(needs.*.result, 'failure')
|
|| contains(needs.*.result, 'failure')
|
||||||
|| contains(needs.*.result, 'cancelled')
|
|| contains(needs.*.result, 'cancelled')
|
||||||
|
|||||||
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
@@ -3,9 +3,8 @@ name: Create Release Branch
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# It should be kept in sync with if-condition in jobs
|
# It should be kept in sync with if-condition in jobs
|
||||||
|
- cron: '0 6 * * MON' # Storage release
|
||||||
- cron: '0 6 * * THU' # Proxy release
|
- cron: '0 6 * * THU' # Proxy release
|
||||||
- cron: '0 6 * * FRI' # Storage release
|
|
||||||
- cron: '0 7 * * FRI' # Compute release
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
@@ -30,7 +29,7 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }}
|
if: ${{ github.event.schedule == '0 6 * * MON' || inputs.create-storage-release-branch }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
@@ -56,7 +55,7 @@ jobs:
|
|||||||
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
|
|
||||||
create-compute-release-branch:
|
create-compute-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 7 * * FRI' || inputs.create-compute-release-branch }}
|
if: inputs.create-compute-release-branch
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
8
.github/workflows/trigger-e2e-tests.yml
vendored
8
.github/workflows/trigger-e2e-tests.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
|||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
TAG: ${{ needs.tag.outputs.build-tag }}
|
TAG: ${{ needs.tag.outputs.build-tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Wait for `promote-images-dev` job to finish
|
- name: Wait for `promote-images` job to finish
|
||||||
# It's important to have a timeout here, the script in the step can run infinitely
|
# It's important to have a timeout here, the script in the step can run infinitely
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
run: |
|
run: |
|
||||||
@@ -79,17 +79,17 @@ jobs:
|
|||||||
# For PRs we use the run id as the tag
|
# For PRs we use the run id as the tag
|
||||||
BUILD_AND_TEST_RUN_ID=${TAG}
|
BUILD_AND_TEST_RUN_ID=${TAG}
|
||||||
while true; do
|
while true; do
|
||||||
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images-dev") | .conclusion')
|
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images") | .conclusion')
|
||||||
case "$conclusion" in
|
case "$conclusion" in
|
||||||
success)
|
success)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
failure | cancelled | skipped)
|
failure | cancelled | skipped)
|
||||||
echo "The 'promote-images-dev' job didn't succeed: '${conclusion}'. Exiting..."
|
echo "The 'promote-images' job didn't succeed: '${conclusion}'. Exiting..."
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "The 'promote-images-dev' hasn't succeed yet. Waiting..."
|
echo "The 'promote-images' hasn't succeed yet. Waiting..."
|
||||||
sleep 60
|
sleep 60
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|||||||
33
CODEOWNERS
33
CODEOWNERS
@@ -1,29 +1,16 @@
|
|||||||
# Autoscaling
|
|
||||||
/libs/vm_monitor/ @neondatabase/autoscaling
|
|
||||||
|
|
||||||
# DevProd
|
|
||||||
/.github/ @neondatabase/developer-productivity
|
/.github/ @neondatabase/developer-productivity
|
||||||
|
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
||||||
# Compute
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
/pgxn/ @neondatabase/compute
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
||||||
/vendor/ @neondatabase/compute
|
|
||||||
/compute/ @neondatabase/compute
|
|
||||||
/compute_tools/ @neondatabase/compute
|
|
||||||
|
|
||||||
# Proxy
|
|
||||||
/libs/proxy/ @neondatabase/proxy
|
/libs/proxy/ @neondatabase/proxy
|
||||||
/proxy/ @neondatabase/proxy
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/storage
|
||||||
# Storage
|
/libs/vm_monitor/ @neondatabase/autoscaling
|
||||||
/pageserver/ @neondatabase/storage
|
/pageserver/ @neondatabase/storage
|
||||||
|
/pgxn/ @neondatabase/compute
|
||||||
|
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/proxy/ @neondatabase/proxy
|
||||||
/safekeeper/ @neondatabase/storage
|
/safekeeper/ @neondatabase/storage
|
||||||
/storage_controller @neondatabase/storage
|
/storage_controller @neondatabase/storage
|
||||||
/storage_scrubber @neondatabase/storage
|
/storage_scrubber @neondatabase/storage
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
/vendor/ @neondatabase/compute
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
|
||||||
/libs/safekeeper_api/ @neondatabase/storage
|
|
||||||
|
|
||||||
# Shared
|
|
||||||
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
|
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
|
|
||||||
|
|||||||
1314
Cargo.lock
generated
1314
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
47
Cargo.toml
47
Cargo.toml
@@ -11,7 +11,6 @@ members = [
|
|||||||
"pageserver/pagebench",
|
"pageserver/pagebench",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"safekeeper/client",
|
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"storage_controller",
|
"storage_controller",
|
||||||
"storage_controller/client",
|
"storage_controller/client",
|
||||||
@@ -52,9 +51,11 @@ anyhow = { version = "1.0", features = ["backtrace"] }
|
|||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
||||||
atomic-take = "1.1.0"
|
atomic-take = "1.1.0"
|
||||||
backtrace = "0.3.74"
|
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
||||||
|
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
flate2 = "1.0.26"
|
flate2 = "1.0.26"
|
||||||
assert-json-diff = "2"
|
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
aws-config = { version = "1.5", default-features = false, features=["rustls", "sso"] }
|
aws-config = { version = "1.5", default-features = false, features=["rustls", "sso"] }
|
||||||
@@ -66,7 +67,7 @@ aws-smithy-types = "1.2"
|
|||||||
aws-credential-types = "1.2.0"
|
aws-credential-types = "1.2.0"
|
||||||
aws-sigv4 = { version = "1.2", features = ["sign-http"] }
|
aws-sigv4 = { version = "1.2", features = ["sign-http"] }
|
||||||
aws-types = "1.3"
|
aws-types = "1.3"
|
||||||
axum = { version = "0.8.1", features = ["ws"] }
|
axum = { version = "0.7.5", features = ["ws"] }
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
bindgen = "0.70"
|
bindgen = "0.70"
|
||||||
@@ -78,10 +79,10 @@ camino = "1.1.6"
|
|||||||
cfg-if = "1.0.0"
|
cfg-if = "1.0.0"
|
||||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||||
clap = { version = "4.0", features = ["derive", "env"] }
|
clap = { version = "4.0", features = ["derive", "env"] }
|
||||||
clashmap = { version = "1.0", features = ["raw-api"] }
|
|
||||||
comfy-table = "7.1"
|
comfy-table = "7.1"
|
||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
|
dashmap = { version = "5.5.0", features = ["raw-api"] }
|
||||||
diatomic-waker = { version = "0.2.3" }
|
diatomic-waker = { version = "0.2.3" }
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
enum-map = "2.4.2"
|
enum-map = "2.4.2"
|
||||||
@@ -111,7 +112,6 @@ hyper-util = "0.1"
|
|||||||
tokio-tungstenite = "0.21.0"
|
tokio-tungstenite = "0.21.0"
|
||||||
indexmap = "2"
|
indexmap = "2"
|
||||||
indoc = "2"
|
indoc = "2"
|
||||||
inferno = "0.12.0"
|
|
||||||
ipnet = "2.10.0"
|
ipnet = "2.10.0"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
itoa = "1.0.11"
|
itoa = "1.0.11"
|
||||||
@@ -124,20 +124,20 @@ measured = { version = "0.0.22", features=["lasso"] }
|
|||||||
measured-process = { version = "0.0.22" }
|
measured-process = { version = "0.0.22" }
|
||||||
memoffset = "0.9"
|
memoffset = "0.9"
|
||||||
nix = { version = "0.27", features = ["dir", "fs", "process", "socket", "signal", "poll"] }
|
nix = { version = "0.27", features = ["dir", "fs", "process", "socket", "signal", "poll"] }
|
||||||
notify = "8.0.0"
|
notify = "6.0.0"
|
||||||
num_cpus = "1.15"
|
num_cpus = "1.15"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
once_cell = "1.13"
|
once_cell = "1.13"
|
||||||
opentelemetry = "0.27"
|
opentelemetry = "0.26"
|
||||||
opentelemetry_sdk = "0.27"
|
opentelemetry_sdk = "0.26"
|
||||||
opentelemetry-otlp = { version = "0.27", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
opentelemetry-otlp = { version = "0.26", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
opentelemetry-semantic-conventions = "0.27"
|
opentelemetry-semantic-conventions = "0.26"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||||
parquet_derive = "53"
|
parquet_derive = "53"
|
||||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "protobuf", "protobuf-codec"] }
|
pprof = { version = "0.14", features = ["criterion", "flamegraph", "protobuf", "protobuf-codec"] }
|
||||||
procfs = "0.16"
|
procfs = "0.16"
|
||||||
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
@@ -145,7 +145,7 @@ rand = "0.8"
|
|||||||
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
||||||
regex = "1.10.2"
|
regex = "1.10.2"
|
||||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
||||||
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_27"] }
|
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_26"] }
|
||||||
reqwest-middleware = "0.4"
|
reqwest-middleware = "0.4"
|
||||||
reqwest-retry = "0.7"
|
reqwest-retry = "0.7"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
@@ -178,7 +178,7 @@ test-context = "0.3"
|
|||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] }
|
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] }
|
||||||
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
|
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
|
||||||
tokio = { version = "1.41", features = ["macros"] }
|
tokio = { version = "1.17", features = ["macros"] }
|
||||||
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
||||||
tokio-io-timeout = "1.2.0"
|
tokio-io-timeout = "1.2.0"
|
||||||
tokio-postgres-rustls = "0.12.0"
|
tokio-postgres-rustls = "0.12.0"
|
||||||
@@ -188,15 +188,11 @@ tokio-tar = "0.3"
|
|||||||
tokio-util = { version = "0.7.10", features = ["io", "rt"] }
|
tokio-util = { version = "0.7.10", features = ["io", "rt"] }
|
||||||
toml = "0.8"
|
toml = "0.8"
|
||||||
toml_edit = "0.22"
|
toml_edit = "0.22"
|
||||||
tonic = {version = "0.12.3", default-features = false, features = ["channel", "tls", "tls-roots"]}
|
tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
|
||||||
tower = { version = "0.5.2", default-features = false }
|
tower-service = "0.3.2"
|
||||||
tower-http = { version = "0.6.2", features = ["request-id", "trace"] }
|
|
||||||
tower-service = "0.3.3"
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-error = "0.2"
|
tracing-error = "0.2"
|
||||||
tracing-log = "0.2"
|
tracing-opentelemetry = "0.27"
|
||||||
tracing-opentelemetry = "0.28"
|
|
||||||
tracing-serde = "0.2.0"
|
|
||||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||||
try-lock = "0.2.5"
|
try-lock = "0.2.5"
|
||||||
twox-hash = { version = "1.6.3", default-features = false }
|
twox-hash = { version = "1.6.3", default-features = false }
|
||||||
@@ -220,12 +216,6 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
|
|||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||||
|
|
||||||
## Azure SDK crates
|
|
||||||
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
|
||||||
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
|
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
@@ -241,7 +231,6 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
|
|||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
safekeeper_client = { path = "./safekeeper/client" }
|
|
||||||
desim = { version = "0.1", path = "./libs/desim" }
|
desim = { version = "0.1", path = "./libs/desim" }
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
storage_controller_client = { path = "./storage_controller/client" }
|
storage_controller_client = { path = "./storage_controller/client" }
|
||||||
@@ -272,8 +261,6 @@ tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", br
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
# This is useful for profiling and, to some extent, debug.
|
# This is useful for profiling and, to some extent, debug.
|
||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
#
|
|
||||||
# NB: we also enable frame pointers for improved profiling, see .cargo/config.toml.
|
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
# disable debug symbols for all packages except this one to decrease binaries size
|
# disable debug symbols for all packages except this one to decrease binaries size
|
||||||
|
|||||||
12
Dockerfile
12
Dockerfile
@@ -45,7 +45,7 @@ COPY --chown=nonroot . .
|
|||||||
|
|
||||||
ARG ADDITIONAL_RUSTFLAGS
|
ARG ADDITIONAL_RUSTFLAGS
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
||||||
--bin pg_sni_router \
|
--bin pg_sni_router \
|
||||||
--bin pageserver \
|
--bin pageserver \
|
||||||
--bin pagectl \
|
--bin pagectl \
|
||||||
@@ -64,16 +64,11 @@ ARG DEFAULT_PG_VERSION
|
|||||||
WORKDIR /data
|
WORKDIR /data
|
||||||
|
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries \
|
|
||||||
&& apt update \
|
&& apt update \
|
||||||
&& apt install -y \
|
&& apt install -y \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
libseccomp-dev \
|
libseccomp-dev \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
# System postgres for use with client libraries (e.g. in storage controller)
|
|
||||||
postgresql-15 \
|
|
||||||
openssl \
|
|
||||||
&& rm -f /etc/apt/apt.conf.d/80-retries \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
@@ -106,6 +101,11 @@ RUN mkdir -p /data/.neon/ && \
|
|||||||
> /data/.neon/pageserver.toml && \
|
> /data/.neon/pageserver.toml && \
|
||||||
chown -R neon:neon /data/.neon
|
chown -R neon:neon /data/.neon
|
||||||
|
|
||||||
|
# When running a binary that links with libpq, default to using our most recent postgres version. Binaries
|
||||||
|
# that want a particular postgres version will select it explicitly: this is just a default.
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/v${DEFAULT_PG_VERSION}/lib
|
||||||
|
|
||||||
|
|
||||||
VOLUME ["/data"]
|
VOLUME ["/data"]
|
||||||
USER neon
|
USER neon
|
||||||
EXPOSE 6400
|
EXPOSE 6400
|
||||||
|
|||||||
5
Makefile
5
Makefile
@@ -3,6 +3,7 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
|||||||
# Where to install Postgres, default is ./pg_install, maybe useful for package managers
|
# Where to install Postgres, default is ./pg_install, maybe useful for package managers
|
||||||
POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/
|
POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/
|
||||||
|
|
||||||
|
OPENSSL_PREFIX_DIR := /usr/local/openssl
|
||||||
ICU_PREFIX_DIR := /usr/local/icu
|
ICU_PREFIX_DIR := /usr/local/icu
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -25,9 +26,11 @@ endif
|
|||||||
ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes)
|
ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes)
|
||||||
# Exclude static build openssl, icu for local build (MacOS, Linux)
|
# Exclude static build openssl, icu for local build (MacOS, Linux)
|
||||||
# Only keep for build type release and debug
|
# Only keep for build type release and debug
|
||||||
|
PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include
|
||||||
PG_CONFIGURE_OPTS += --with-icu
|
PG_CONFIGURE_OPTS += --with-icu
|
||||||
PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION'
|
PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION'
|
||||||
PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm'
|
PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm'
|
||||||
|
PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread'
|
||||||
endif
|
endif
|
||||||
|
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
@@ -64,6 +67,8 @@ CARGO_BUILD_FLAGS += $(filter -j1,$(MAKEFLAGS))
|
|||||||
CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
|
CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
|
||||||
# Force cargo not to print progress bar
|
# Force cargo not to print progress bar
|
||||||
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
|
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
|
||||||
|
# Set PQ_LIB_DIR to make sure `storage_controller` get linked with bundled libpq (through diesel)
|
||||||
|
CARGO_CMD_PREFIX += PQ_LIB_DIR=$(POSTGRES_INSTALL_DIR)/v16/lib
|
||||||
|
|
||||||
CACHEDIR_TAG_CONTENTS := "Signature: 8a477f597d28d172789f06886806bc55"
|
CACHEDIR_TAG_CONTENTS := "Signature: 8a477f597d28d172789f06886806bc55"
|
||||||
|
|
||||||
|
|||||||
@@ -21,10 +21,8 @@ The Neon storage engine consists of two major components:
|
|||||||
|
|
||||||
See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information.
|
See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information.
|
||||||
|
|
||||||
## Running a local development environment
|
## Running local installation
|
||||||
|
|
||||||
Neon can be run on a workstation for small experiments and to test code changes, by
|
|
||||||
following these instructions.
|
|
||||||
|
|
||||||
#### Installing dependencies on Linux
|
#### Installing dependencies on Linux
|
||||||
1. Install build dependencies and other applicable packages
|
1. Install build dependencies and other applicable packages
|
||||||
@@ -240,7 +238,7 @@ postgres=# select * from t;
|
|||||||
> cargo neon stop
|
> cargo neon stop
|
||||||
```
|
```
|
||||||
|
|
||||||
More advanced usages can be found at [Local Development Control Plane (`neon_local`))](./control_plane/README.md).
|
More advanced usages can be found at [Control Plane and Neon Local](./control_plane/README.md).
|
||||||
|
|
||||||
#### Handling build failures
|
#### Handling build failures
|
||||||
|
|
||||||
|
|||||||
@@ -3,15 +3,6 @@ ARG DEBIAN_VERSION=bookworm
|
|||||||
FROM debian:bookworm-slim AS pgcopydb_builder
|
FROM debian:bookworm-slim AS pgcopydb_builder
|
||||||
ARG DEBIAN_VERSION
|
ARG DEBIAN_VERSION
|
||||||
|
|
||||||
# Use strict mode for bash to catch errors early
|
|
||||||
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
# By default, /bin/sh used in debian images will treat '\n' as eol,
|
|
||||||
# but as we use bash as SHELL, and built-in echo in bash requires '-e' flag for that.
|
|
||||||
RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries && \
|
|
||||||
echo -e "retry_connrefused = on\ntimeout=15\ntries=5\n" > /root/.wgetrc && \
|
|
||||||
echo -e "--retry-connrefused\n--connect-timeout 15\n--retry 5\n--max-time 300\n" > /root/.curlrc
|
|
||||||
|
|
||||||
RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \
|
RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \
|
||||||
set -e && \
|
set -e && \
|
||||||
apt update && \
|
apt update && \
|
||||||
@@ -60,8 +51,7 @@ ARG DEBIAN_VERSION
|
|||||||
|
|
||||||
# Add nonroot user
|
# Add nonroot user
|
||||||
RUN useradd -ms /bin/bash nonroot -b /home
|
RUN useradd -ms /bin/bash nonroot -b /home
|
||||||
# Use strict mode for bash to catch errors early
|
SHELL ["/bin/bash", "-c"]
|
||||||
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN mkdir -p /pgcopydb/bin && \
|
RUN mkdir -p /pgcopydb/bin && \
|
||||||
mkdir -p /pgcopydb/lib && \
|
mkdir -p /pgcopydb/lib && \
|
||||||
@@ -71,10 +61,6 @@ RUN mkdir -p /pgcopydb/bin && \
|
|||||||
COPY --from=pgcopydb_builder /usr/lib/postgresql/16/bin/pgcopydb /pgcopydb/bin/pgcopydb
|
COPY --from=pgcopydb_builder /usr/lib/postgresql/16/bin/pgcopydb /pgcopydb/bin/pgcopydb
|
||||||
COPY --from=pgcopydb_builder /pgcopydb/lib/libpq.so.5 /pgcopydb/lib/libpq.so.5
|
COPY --from=pgcopydb_builder /pgcopydb/lib/libpq.so.5 /pgcopydb/lib/libpq.so.5
|
||||||
|
|
||||||
RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries && \
|
|
||||||
echo -e "retry_connrefused = on\ntimeout=15\ntries=5\n" > /root/.wgetrc && \
|
|
||||||
echo -e "--retry-connrefused\n--connect-timeout 15\n--retry 5\n--max-time 300\n" > /root/.curlrc
|
|
||||||
|
|
||||||
# System deps
|
# System deps
|
||||||
#
|
#
|
||||||
# 'gdb' is included so that we get backtraces of core dumps produced in
|
# 'gdb' is included so that we get backtraces of core dumps produced in
|
||||||
@@ -129,7 +115,7 @@ RUN set -e \
|
|||||||
|
|
||||||
# Keep the version the same as in compute/compute-node.Dockerfile and
|
# Keep the version the same as in compute/compute-node.Dockerfile and
|
||||||
# test_runner/regress/test_compute_metrics.py.
|
# test_runner/regress/test_compute_metrics.py.
|
||||||
ENV SQL_EXPORTER_VERSION=0.17.0
|
ENV SQL_EXPORTER_VERSION=0.13.1
|
||||||
RUN curl -fsSL \
|
RUN curl -fsSL \
|
||||||
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
|
||||||
--output sql_exporter.tar.gz \
|
--output sql_exporter.tar.gz \
|
||||||
@@ -196,20 +182,29 @@ RUN set -e \
|
|||||||
# It includes several bug fixes on top on v2.0 release (https://github.com/linux-test-project/lcov/compare/v2.0...master)
|
# It includes several bug fixes on top on v2.0 release (https://github.com/linux-test-project/lcov/compare/v2.0...master)
|
||||||
# And patches from us:
|
# And patches from us:
|
||||||
# - Generates json file with code coverage summary (https://github.com/neondatabase/lcov/commit/426e7e7a22f669da54278e9b55e6d8caabd00af0.tar.gz)
|
# - Generates json file with code coverage summary (https://github.com/neondatabase/lcov/commit/426e7e7a22f669da54278e9b55e6d8caabd00af0.tar.gz)
|
||||||
RUN set +o pipefail && \
|
RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JSON::XS Memory::Process Time::HiRes JSON; do yes | perl -MCPAN -e "CPAN::Shell->notest('install', '$package')"; done \
|
||||||
for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JSON::XS Memory::Process Time::HiRes JSON; do \
|
&& wget https://github.com/neondatabase/lcov/archive/426e7e7a22f669da54278e9b55e6d8caabd00af0.tar.gz -O lcov.tar.gz \
|
||||||
yes | perl -MCPAN -e "CPAN::Shell->notest('install', '$package')";\
|
|
||||||
done && \
|
|
||||||
set -o pipefail
|
|
||||||
# Split into separate step to debug flaky failures here
|
|
||||||
RUN wget https://github.com/neondatabase/lcov/archive/426e7e7a22f669da54278e9b55e6d8caabd00af0.tar.gz -O lcov.tar.gz \
|
|
||||||
&& ls -laht lcov.tar.gz && sha256sum lcov.tar.gz \
|
|
||||||
&& echo "61a22a62e20908b8b9e27d890bd0ea31f567a7b9668065589266371dcbca0992 lcov.tar.gz" | sha256sum --check \
|
&& echo "61a22a62e20908b8b9e27d890bd0ea31f567a7b9668065589266371dcbca0992 lcov.tar.gz" | sha256sum --check \
|
||||||
&& mkdir -p lcov && tar -xzf lcov.tar.gz -C lcov --strip-components=1 \
|
&& mkdir -p lcov && tar -xzf lcov.tar.gz -C lcov --strip-components=1 \
|
||||||
&& cd lcov \
|
&& cd lcov \
|
||||||
&& make install \
|
&& make install \
|
||||||
&& rm -rf ../lcov.tar.gz
|
&& rm -rf ../lcov.tar.gz
|
||||||
|
|
||||||
|
# Compile and install the static OpenSSL library
|
||||||
|
ENV OPENSSL_VERSION=1.1.1w
|
||||||
|
ENV OPENSSL_PREFIX=/usr/local/openssl
|
||||||
|
RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \
|
||||||
|
echo "cf3098950cb4d853ad95c0841f1f9c6d3dc102dccfcacd521d93925208b76ac8 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \
|
||||||
|
cd /tmp && \
|
||||||
|
tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \
|
||||||
|
rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \
|
||||||
|
cd /tmp/openssl-${OPENSSL_VERSION} && \
|
||||||
|
./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \
|
||||||
|
make -j "$(nproc)" && \
|
||||||
|
make install && \
|
||||||
|
cd /tmp && \
|
||||||
|
rm -rf /tmp/openssl-${OPENSSL_VERSION}
|
||||||
|
|
||||||
# Use the same version of libicu as the compute nodes so that
|
# Use the same version of libicu as the compute nodes so that
|
||||||
# clusters created using inidb on pageserver can be used by computes.
|
# clusters created using inidb on pageserver can be used by computes.
|
||||||
#
|
#
|
||||||
@@ -238,8 +233,6 @@ RUN wget -O /tmp/libicu-${ICU_VERSION}.tgz https://github.com/unicode-org/icu/re
|
|||||||
USER nonroot:nonroot
|
USER nonroot:nonroot
|
||||||
WORKDIR /home/nonroot
|
WORKDIR /home/nonroot
|
||||||
|
|
||||||
RUN echo -e "--retry-connrefused\n--connect-timeout 15\n--retry 5\n--max-time 300\n" > /home/nonroot/.curlrc
|
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
ENV PYTHON_VERSION=3.11.10 \
|
ENV PYTHON_VERSION=3.11.10 \
|
||||||
PYENV_ROOT=/home/nonroot/.pyenv \
|
PYENV_ROOT=/home/nonroot/.pyenv \
|
||||||
@@ -265,7 +258,7 @@ WORKDIR /home/nonroot
|
|||||||
|
|
||||||
# Rust
|
# Rust
|
||||||
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
||||||
ENV RUSTC_VERSION=1.84.1
|
ENV RUSTC_VERSION=1.83.0
|
||||||
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
||||||
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
||||||
ARG RUSTFILT_VERSION=0.2.1
|
ARG RUSTFILT_VERSION=0.2.1
|
||||||
@@ -273,7 +266,6 @@ ARG CARGO_HAKARI_VERSION=0.9.33
|
|||||||
ARG CARGO_DENY_VERSION=0.16.2
|
ARG CARGO_DENY_VERSION=0.16.2
|
||||||
ARG CARGO_HACK_VERSION=0.6.33
|
ARG CARGO_HACK_VERSION=0.6.33
|
||||||
ARG CARGO_NEXTEST_VERSION=0.9.85
|
ARG CARGO_NEXTEST_VERSION=0.9.85
|
||||||
ARG CARGO_DIESEL_CLI_VERSION=2.2.6
|
|
||||||
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
||||||
chmod +x rustup-init && \
|
chmod +x rustup-init && \
|
||||||
./rustup-init -y --default-toolchain ${RUSTC_VERSION} && \
|
./rustup-init -y --default-toolchain ${RUSTC_VERSION} && \
|
||||||
@@ -287,8 +279,6 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux
|
|||||||
cargo install cargo-deny --locked --version ${CARGO_DENY_VERSION} && \
|
cargo install cargo-deny --locked --version ${CARGO_DENY_VERSION} && \
|
||||||
cargo install cargo-hack --version ${CARGO_HACK_VERSION} && \
|
cargo install cargo-hack --version ${CARGO_HACK_VERSION} && \
|
||||||
cargo install cargo-nextest --version ${CARGO_NEXTEST_VERSION} && \
|
cargo install cargo-nextest --version ${CARGO_NEXTEST_VERSION} && \
|
||||||
cargo install diesel_cli --version ${CARGO_DIESEL_CLI_VERSION} \
|
|
||||||
--features postgres-bundled --no-default-features && \
|
|
||||||
rm -rf /home/nonroot/.cargo/registry && \
|
rm -rf /home/nonroot/.cargo/registry && \
|
||||||
rm -rf /home/nonroot/.cargo/git
|
rm -rf /home/nonroot/.cargo/git
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@
|
|||||||
metrics: [
|
metrics: [
|
||||||
import 'sql_exporter/checkpoints_req.libsonnet',
|
import 'sql_exporter/checkpoints_req.libsonnet',
|
||||||
import 'sql_exporter/checkpoints_timed.libsonnet',
|
import 'sql_exporter/checkpoints_timed.libsonnet',
|
||||||
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet',
|
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
|
||||||
import 'sql_exporter/compute_current_lsn.libsonnet',
|
import 'sql_exporter/compute_current_lsn.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
|
||||||
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
|
||||||
|
|||||||
@@ -19,12 +19,3 @@ max_prepared_statements=0
|
|||||||
admin_users=postgres
|
admin_users=postgres
|
||||||
unix_socket_dir=/tmp/
|
unix_socket_dir=/tmp/
|
||||||
unix_socket_mode=0777
|
unix_socket_mode=0777
|
||||||
; required for pgbouncer_exporter
|
|
||||||
ignore_startup_parameters=extra_float_digits
|
|
||||||
|
|
||||||
;; Disable connection logging. It produces a lot of logs that no one looks at,
|
|
||||||
;; and we can get similar log entries from the proxy too. We had incidents in
|
|
||||||
;; the past where the logging significantly stressed the log device or pgbouncer
|
|
||||||
;; itself.
|
|
||||||
log_connections=0
|
|
||||||
log_disconnections=0
|
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
{
|
{
|
||||||
metric_name: 'compute_backpressure_throttling_seconds_total',
|
metric_name: 'compute_backpressure_throttling_seconds',
|
||||||
type: 'counter',
|
type: 'gauge',
|
||||||
help: 'Time compute has spent throttled',
|
help: 'Time compute has spent throttled',
|
||||||
key_labels: null,
|
key_labels: null,
|
||||||
values: [
|
values: [
|
||||||
'throttled',
|
'throttled',
|
||||||
],
|
],
|
||||||
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql',
|
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
|
||||||
}
|
}
|
||||||
@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
|
|||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
SET search_path TO 'addr_nsp';
|
SET search_path TO 'addr_nsp';
|
||||||
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
|
||||||
index 8475231735..0653946337 100644
|
index 8475231735..1afae5395f 100644
|
||||||
--- a/src/test/regress/expected/password.out
|
--- a/src/test/regress/expected/password.out
|
||||||
+++ b/src/test/regress/expected/password.out
|
+++ b/src/test/regress/expected/password.out
|
||||||
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
|
||||||
@@ -1006,63 +1006,65 @@ index 8475231735..0653946337 100644
|
|||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
|
||||||
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd4 |
|
- regress_passwd4 |
|
||||||
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- Rename a role
|
-- Rename a role
|
||||||
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
-- passwords.
|
-- passwords.
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
-- so it should be hashed with SCRAM-SHA-256.
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- These may look like valid MD5 secrets, but they are not, so they
|
||||||
|
-- should be hashed with SCRAM-SHA-256.
|
||||||
|
-- trailing garbage at the end
|
||||||
|
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
|
-- invalid length
|
||||||
|
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
||||||
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-- Changing the SCRAM iteration count
|
-- Changing the SCRAM iteration count
|
||||||
SET scram_iterations = 1024;
|
SET scram_iterations = 1024;
|
||||||
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
|
||||||
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
||||||
ORDER BY rolname, rolpassword;
|
ORDER BY rolname, rolpassword;
|
||||||
rolname | rolpassword_masked
|
rolname | rolpassword_masked
|
||||||
-----------------+---------------------------------------------------
|
-----------------+---------------------------------------------------
|
||||||
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
|
||||||
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
|
||||||
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
|
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
|
||||||
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
|
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
|
||||||
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
|
||||||
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
- regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
||||||
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
|
regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
|
||||||
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
|
-(9 rows)
|
||||||
|
+(5 rows)
|
||||||
|
|
||||||
-- An empty password is not allowed, in any form
|
-- An empty password is not allowed, in any form
|
||||||
CREATE ROLE regress_passwd_empty PASSWORD '';
|
CREATE ROLE regress_passwd_empty PASSWORD '';
|
||||||
NOTICE: empty string is not a valid password, clearing password
|
NOTICE: empty string is not a valid password, clearing password
|
||||||
@@ -1080,37 +1082,56 @@ index 8475231735..0653946337 100644
|
|||||||
-(1 row)
|
-(1 row)
|
||||||
+(0 rows)
|
+(0 rows)
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
-- Test with invalid stored and server keys.
|
||||||
---
|
--
|
||||||
--- The first is valid, to act as a control. The others have too long
|
-- The first is valid, to act as a control. The others have too long
|
||||||
--- stored/server keys. They will be re-hashed.
|
-- stored/server keys. They will be re-hashed.
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
||||||
-- should not contain the original salt.
|
-- should not contain the original salt.
|
||||||
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
|
||||||
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw
|
FROM pg_authid
|
||||||
|
WHERE rolname LIKE 'regress_passwd_sha_len%'
|
||||||
ORDER BY rolname;
|
ORDER BY rolname;
|
||||||
rolname | is_rolpassword_rehashed
|
- rolname | is_rolpassword_rehashed
|
||||||
-------------------------+-------------------------
|
--------------------------+-------------------------
|
||||||
- regress_passwd_sha_len0 | f
|
- regress_passwd_sha_len0 | f
|
||||||
+ regress_passwd_sha_len0 | t
|
- regress_passwd_sha_len1 | t
|
||||||
regress_passwd_sha_len1 | t
|
- regress_passwd_sha_len2 | t
|
||||||
regress_passwd_sha_len2 | t
|
-(3 rows)
|
||||||
(3 rows)
|
+ rolname | is_rolpassword_rehashed
|
||||||
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7;
|
+---------+-------------------------
|
||||||
|
+(0 rows)
|
||||||
|
|
||||||
|
DROP ROLE regress_passwd1;
|
||||||
|
DROP ROLE regress_passwd2;
|
||||||
|
DROP ROLE regress_passwd3;
|
||||||
|
DROP ROLE regress_passwd4;
|
||||||
|
DROP ROLE regress_passwd5;
|
||||||
|
+ERROR: role "regress_passwd5" does not exist
|
||||||
|
DROP ROLE regress_passwd6;
|
||||||
|
+ERROR: role "regress_passwd6" does not exist
|
||||||
|
DROP ROLE regress_passwd7;
|
||||||
|
+ERROR: role "regress_passwd7" does not exist
|
||||||
DROP ROLE regress_passwd8;
|
DROP ROLE regress_passwd8;
|
||||||
|
+ERROR: role "regress_passwd8" does not exist
|
||||||
DROP ROLE regress_passwd9;
|
DROP ROLE regress_passwd9;
|
||||||
DROP ROLE regress_passwd_empty;
|
DROP ROLE regress_passwd_empty;
|
||||||
+ERROR: role "regress_passwd_empty" does not exist
|
+ERROR: role "regress_passwd_empty" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len0;
|
DROP ROLE regress_passwd_sha_len0;
|
||||||
|
+ERROR: role "regress_passwd_sha_len0" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len1;
|
DROP ROLE regress_passwd_sha_len1;
|
||||||
|
+ERROR: role "regress_passwd_sha_len1" does not exist
|
||||||
DROP ROLE regress_passwd_sha_len2;
|
DROP ROLE regress_passwd_sha_len2;
|
||||||
|
+ERROR: role "regress_passwd_sha_len2" does not exist
|
||||||
|
-- all entries should have been removed
|
||||||
|
SELECT rolname, rolpassword
|
||||||
|
FROM pg_authid
|
||||||
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
|
||||||
index 5b9dba7b32..cc408dad42 100644
|
index 5b9dba7b32..cc408dad42 100644
|
||||||
--- a/src/test/regress/expected/privileges.out
|
--- a/src/test/regress/expected/privileges.out
|
||||||
@@ -3173,7 +3194,7 @@ index 1a6c61f49d..1c31ac6a53 100644
|
|||||||
-- Test generic object addressing/identification functions
|
-- Test generic object addressing/identification functions
|
||||||
CREATE SCHEMA addr_nsp;
|
CREATE SCHEMA addr_nsp;
|
||||||
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
|
||||||
index 53e86b0b6c..0303fdfe96 100644
|
index 53e86b0b6c..f07cf1ec54 100644
|
||||||
--- a/src/test/regress/sql/password.sql
|
--- a/src/test/regress/sql/password.sql
|
||||||
+++ b/src/test/regress/sql/password.sql
|
+++ b/src/test/regress/sql/password.sql
|
||||||
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
|
||||||
@@ -3192,59 +3213,23 @@ index 53e86b0b6c..0303fdfe96 100644
|
|||||||
|
|
||||||
-- check list of created entries
|
-- check list of created entries
|
||||||
--
|
--
|
||||||
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
|
||||||
SET password_encryption = 'md5';
|
SET password_encryption = 'md5';
|
||||||
|
|
||||||
-- encrypt with MD5
|
-- encrypt with MD5
|
||||||
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
|
||||||
--- already encrypted, use as they are
|
|
||||||
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
|
||||||
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
|
||||||
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
|
-- already encrypted, use as they are
|
||||||
|
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
|
||||||
|
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
|
||||||
|
|
||||||
SET password_encryption = 'scram-sha-256';
|
SET password_encryption = 'scram-sha-256';
|
||||||
-- create SCRAM secret
|
-- create SCRAM secret
|
||||||
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
|
||||||
--- already encrypted with MD5, use as it is
|
|
||||||
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
|
||||||
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
-- already encrypted with MD5, use as it is
|
||||||
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
|
||||||
|
|
||||||
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
|
|
||||||
--- so it should be hashed with SCRAM-SHA-256.
|
|
||||||
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
|
|
||||||
--- These may look like valid MD5 secrets, but they are not, so they
|
|
||||||
--- should be hashed with SCRAM-SHA-256.
|
|
||||||
--- trailing garbage at the end
|
|
||||||
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
|
|
||||||
--- invalid length
|
|
||||||
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Changing the SCRAM iteration count
|
|
||||||
SET scram_iterations = 1024;
|
|
||||||
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
|
|
||||||
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
|
|
||||||
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
|
|
||||||
|
|
||||||
--- Test with invalid stored and server keys.
|
|
||||||
---
|
|
||||||
--- The first is valid, to act as a control. The others have too long
|
|
||||||
--- stored/server keys. They will be re-hashed.
|
|
||||||
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
|
|
||||||
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
|
|
||||||
+-- Neon does not support encrypted passwords, use unencrypted instead
|
|
||||||
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
|
|
||||||
|
|
||||||
-- Check that the invalid secrets were re-hashed. A re-hashed secret
|
|
||||||
-- should not contain the original salt.
|
|
||||||
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
|
||||||
index 249df17a58..b258e7f26a 100644
|
index 249df17a58..b258e7f26a 100644
|
||||||
--- a/src/test/regress/sql/privileges.sql
|
--- a/src/test/regress/sql/privileges.sql
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,242 +0,0 @@
|
|||||||
diff --git a/contrib/amcheck/expected/check_heap.out b/contrib/amcheck/expected/check_heap.out
|
|
||||||
index 979e5e8..2375b45 100644
|
|
||||||
--- a/contrib/amcheck/expected/check_heap.out
|
|
||||||
+++ b/contrib/amcheck/expected/check_heap.out
|
|
||||||
@@ -80,12 +80,9 @@ INSERT INTO heaptest (a, b)
|
|
||||||
-- same transaction. The heaptest table is smaller than the default
|
|
||||||
-- wal_skip_threshold, so a wal_level=minimal commit reads the table into
|
|
||||||
-- shared_buffers. A transaction delays that and excludes any autovacuum.
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_test_stats_tblspc LOCATION '';
|
|
||||||
SELECT sum(reads) AS stats_bulkreads_before
|
|
||||||
FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
BEGIN;
|
|
||||||
-ALTER TABLE heaptest SET TABLESPACE regress_test_stats_tblspc;
|
|
||||||
-- Check that valid options are not rejected nor corruption reported
|
|
||||||
-- for a non-empty table
|
|
||||||
SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none');
|
|
||||||
@@ -118,14 +115,6 @@ SELECT pg_stat_force_next_flush();
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-SELECT sum(reads) AS stats_bulkreads_after
|
|
||||||
- FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
-SELECT :stats_bulkreads_after > :stats_bulkreads_before;
|
|
||||||
- ?column?
|
|
||||||
-----------
|
|
||||||
- t
|
|
||||||
-(1 row)
|
|
||||||
-
|
|
||||||
CREATE ROLE regress_heaptest_role;
|
|
||||||
-- verify permissions are checked (error due to function not callable)
|
|
||||||
SET ROLE regress_heaptest_role;
|
|
||||||
@@ -233,7 +222,6 @@ ERROR: cannot check relation "test_foreign_table"
|
|
||||||
DETAIL: This operation is not supported for foreign tables.
|
|
||||||
-- cleanup
|
|
||||||
DROP TABLE heaptest;
|
|
||||||
-DROP TABLESPACE regress_test_stats_tblspc;
|
|
||||||
DROP TABLE test_partition;
|
|
||||||
DROP TABLE test_partitioned;
|
|
||||||
DROP OWNED BY regress_heaptest_role; -- permissions
|
|
||||||
diff --git a/contrib/amcheck/sql/check_heap.sql b/contrib/amcheck/sql/check_heap.sql
|
|
||||||
index 1745bae..3b429c3 100644
|
|
||||||
--- a/contrib/amcheck/sql/check_heap.sql
|
|
||||||
+++ b/contrib/amcheck/sql/check_heap.sql
|
|
||||||
@@ -40,12 +40,9 @@ INSERT INTO heaptest (a, b)
|
|
||||||
-- same transaction. The heaptest table is smaller than the default
|
|
||||||
-- wal_skip_threshold, so a wal_level=minimal commit reads the table into
|
|
||||||
-- shared_buffers. A transaction delays that and excludes any autovacuum.
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_test_stats_tblspc LOCATION '';
|
|
||||||
SELECT sum(reads) AS stats_bulkreads_before
|
|
||||||
FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
BEGIN;
|
|
||||||
-ALTER TABLE heaptest SET TABLESPACE regress_test_stats_tblspc;
|
|
||||||
-- Check that valid options are not rejected nor corruption reported
|
|
||||||
-- for a non-empty table
|
|
||||||
SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none');
|
|
||||||
@@ -58,9 +55,6 @@ COMMIT;
|
|
||||||
-- ALTER TABLE ... SET TABLESPACE ...
|
|
||||||
-- causing an additional bulkread, which should be reflected in pg_stat_io.
|
|
||||||
SELECT pg_stat_force_next_flush();
|
|
||||||
-SELECT sum(reads) AS stats_bulkreads_after
|
|
||||||
- FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
-SELECT :stats_bulkreads_after > :stats_bulkreads_before;
|
|
||||||
|
|
||||||
CREATE ROLE regress_heaptest_role;
|
|
||||||
|
|
||||||
@@ -140,7 +134,6 @@ SELECT * FROM verify_heapam('test_foreign_table',
|
|
||||||
|
|
||||||
-- cleanup
|
|
||||||
DROP TABLE heaptest;
|
|
||||||
-DROP TABLESPACE regress_test_stats_tblspc;
|
|
||||||
DROP TABLE test_partition;
|
|
||||||
DROP TABLE test_partitioned;
|
|
||||||
DROP OWNED BY regress_heaptest_role; -- permissions
|
|
||||||
diff --git a/contrib/citext/expected/create_index_acl.out b/contrib/citext/expected/create_index_acl.out
|
|
||||||
index 33be13a..70a406c 100644
|
|
||||||
--- a/contrib/citext/expected/create_index_acl.out
|
|
||||||
+++ b/contrib/citext/expected/create_index_acl.out
|
|
||||||
@@ -5,9 +5,6 @@
|
|
||||||
-- owner having as few applicable privileges as possible. (The privileges.sql
|
|
||||||
-- regress_sro_user tests look for the opposite defect; they confirm that
|
|
||||||
-- DefineIndex() uses the table owner userid where necessary.)
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_create_idx_tblspace LOCATION '';
|
|
||||||
-RESET allow_in_place_tablespaces;
|
|
||||||
BEGIN;
|
|
||||||
CREATE ROLE regress_minimal;
|
|
||||||
CREATE SCHEMA s;
|
|
||||||
@@ -49,11 +46,9 @@ ALTER TABLE s.x OWNER TO regress_minimal;
|
|
||||||
-- Empty-table DefineIndex()
|
|
||||||
CREATE UNIQUE INDEX u0rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Make the table nonempty.
|
|
||||||
INSERT INTO s.x VALUES ('foo'), ('bar');
|
|
||||||
@@ -66,11 +61,9 @@ RESET search_path;
|
|
||||||
GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal;
|
|
||||||
CREATE UNIQUE INDEX u2rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Shall not find s.coll via search_path, despite the s.const->public.setter
|
|
||||||
-- call having set search_path=s during expression planning. Suppress the
|
|
||||||
@@ -78,9 +71,7 @@ ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
\set VERBOSITY sqlstate
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
ERROR: 42704
|
|
||||||
\set VERBOSITY default
|
|
||||||
ROLLBACK;
|
|
||||||
-DROP TABLESPACE regress_create_idx_tblspace;
|
|
||||||
diff --git a/contrib/citext/sql/create_index_acl.sql b/contrib/citext/sql/create_index_acl.sql
|
|
||||||
index 10b5225..ae442e1 100644
|
|
||||||
--- a/contrib/citext/sql/create_index_acl.sql
|
|
||||||
+++ b/contrib/citext/sql/create_index_acl.sql
|
|
||||||
@@ -6,10 +6,6 @@
|
|
||||||
-- regress_sro_user tests look for the opposite defect; they confirm that
|
|
||||||
-- DefineIndex() uses the table owner userid where necessary.)
|
|
||||||
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_create_idx_tblspace LOCATION '';
|
|
||||||
-RESET allow_in_place_tablespaces;
|
|
||||||
-
|
|
||||||
BEGIN;
|
|
||||||
CREATE ROLE regress_minimal;
|
|
||||||
CREATE SCHEMA s;
|
|
||||||
@@ -51,11 +47,9 @@ ALTER TABLE s.x OWNER TO regress_minimal;
|
|
||||||
-- Empty-table DefineIndex()
|
|
||||||
CREATE UNIQUE INDEX u0rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Make the table nonempty.
|
|
||||||
INSERT INTO s.x VALUES ('foo'), ('bar');
|
|
||||||
@@ -68,11 +62,9 @@ RESET search_path;
|
|
||||||
GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal;
|
|
||||||
CREATE UNIQUE INDEX u2rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Shall not find s.coll via search_path, despite the s.const->public.setter
|
|
||||||
-- call having set search_path=s during expression planning. Suppress the
|
|
||||||
@@ -80,9 +72,7 @@ ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
\set VERBOSITY sqlstate
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
\set VERBOSITY default
|
|
||||||
ROLLBACK;
|
|
||||||
|
|
||||||
-DROP TABLESPACE regress_create_idx_tblspace;
|
|
||||||
diff --git a/contrib/file_fdw/expected/file_fdw.out b/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
index 72304e0..ebe131b 100644
|
|
||||||
--- a/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
+++ b/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
@@ -4,6 +4,7 @@
|
|
||||||
-- directory paths are passed to us in environment variables
|
|
||||||
\getenv abs_srcdir PG_ABS_SRCDIR
|
|
||||||
-- Clean up in case a prior regression run failed
|
|
||||||
+SET compute_query_id TO 'off';
|
|
||||||
SET client_min_messages TO 'warning';
|
|
||||||
DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user;
|
|
||||||
RESET client_min_messages;
|
|
||||||
diff --git a/contrib/file_fdw/sql/file_fdw.sql b/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
index f0548e1..848a08c 100644
|
|
||||||
--- a/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
+++ b/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
@@ -6,6 +6,7 @@
|
|
||||||
\getenv abs_srcdir PG_ABS_SRCDIR
|
|
||||||
|
|
||||||
-- Clean up in case a prior regression run failed
|
|
||||||
+SET compute_query_id TO 'off';
|
|
||||||
SET client_min_messages TO 'warning';
|
|
||||||
DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user;
|
|
||||||
RESET client_min_messages;
|
|
||||||
diff --git a/contrib/pageinspect/expected/gist.out b/contrib/pageinspect/expected/gist.out
|
|
||||||
index d1adbab..38b52ac 100644
|
|
||||||
--- a/contrib/pageinspect/expected/gist.out
|
|
||||||
+++ b/contrib/pageinspect/expected/gist.out
|
|
||||||
@@ -10,25 +10,6 @@ BEGIN;
|
|
||||||
CREATE TABLE test_gist AS SELECT point(i,i) p, i::text t FROM
|
|
||||||
generate_series(1,1000) i;
|
|
||||||
CREATE INDEX test_gist_idx ON test_gist USING gist (p);
|
|
||||||
--- Page 0 is the root, the rest are leaf pages
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 0));
|
|
||||||
- lsn | nsn | rightlink | flags
|
|
||||||
------+-----+------------+-------
|
|
||||||
- 0/1 | 0/0 | 4294967295 | {}
|
|
||||||
-(1 row)
|
|
||||||
-
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 1));
|
|
||||||
- lsn | nsn | rightlink | flags
|
|
||||||
------+-----+------------+--------
|
|
||||||
- 0/1 | 0/0 | 4294967295 | {leaf}
|
|
||||||
-(1 row)
|
|
||||||
-
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2));
|
|
||||||
- lsn | nsn | rightlink | flags
|
|
||||||
------+-----+-----------+--------
|
|
||||||
- 0/1 | 0/0 | 1 | {leaf}
|
|
||||||
-(1 row)
|
|
||||||
-
|
|
||||||
COMMIT;
|
|
||||||
SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 0), 'test_gist_idx');
|
|
||||||
itemoffset | ctid | itemlen | dead | keys
|
|
||||||
diff --git a/contrib/pageinspect/sql/gist.sql b/contrib/pageinspect/sql/gist.sql
|
|
||||||
index d263542..607992f 100644
|
|
||||||
--- a/contrib/pageinspect/sql/gist.sql
|
|
||||||
+++ b/contrib/pageinspect/sql/gist.sql
|
|
||||||
@@ -12,11 +12,6 @@ CREATE TABLE test_gist AS SELECT point(i,i) p, i::text t FROM
|
|
||||||
generate_series(1,1000) i;
|
|
||||||
CREATE INDEX test_gist_idx ON test_gist USING gist (p);
|
|
||||||
|
|
||||||
--- Page 0 is the root, the rest are leaf pages
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 0));
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 1));
|
|
||||||
-SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2));
|
|
||||||
-
|
|
||||||
COMMIT;
|
|
||||||
|
|
||||||
SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 0), 'test_gist_idx');
|
|
||||||
@@ -1,196 +0,0 @@
|
|||||||
diff --git a/contrib/amcheck/expected/check_heap.out b/contrib/amcheck/expected/check_heap.out
|
|
||||||
index 979e5e8..2375b45 100644
|
|
||||||
--- a/contrib/amcheck/expected/check_heap.out
|
|
||||||
+++ b/contrib/amcheck/expected/check_heap.out
|
|
||||||
@@ -80,12 +80,9 @@ INSERT INTO heaptest (a, b)
|
|
||||||
-- same transaction. The heaptest table is smaller than the default
|
|
||||||
-- wal_skip_threshold, so a wal_level=minimal commit reads the table into
|
|
||||||
-- shared_buffers. A transaction delays that and excludes any autovacuum.
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_test_stats_tblspc LOCATION '';
|
|
||||||
SELECT sum(reads) AS stats_bulkreads_before
|
|
||||||
FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
BEGIN;
|
|
||||||
-ALTER TABLE heaptest SET TABLESPACE regress_test_stats_tblspc;
|
|
||||||
-- Check that valid options are not rejected nor corruption reported
|
|
||||||
-- for a non-empty table
|
|
||||||
SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none');
|
|
||||||
@@ -118,14 +115,6 @@ SELECT pg_stat_force_next_flush();
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-SELECT sum(reads) AS stats_bulkreads_after
|
|
||||||
- FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
-SELECT :stats_bulkreads_after > :stats_bulkreads_before;
|
|
||||||
- ?column?
|
|
||||||
-----------
|
|
||||||
- t
|
|
||||||
-(1 row)
|
|
||||||
-
|
|
||||||
CREATE ROLE regress_heaptest_role;
|
|
||||||
-- verify permissions are checked (error due to function not callable)
|
|
||||||
SET ROLE regress_heaptest_role;
|
|
||||||
@@ -233,7 +222,6 @@ ERROR: cannot check relation "test_foreign_table"
|
|
||||||
DETAIL: This operation is not supported for foreign tables.
|
|
||||||
-- cleanup
|
|
||||||
DROP TABLE heaptest;
|
|
||||||
-DROP TABLESPACE regress_test_stats_tblspc;
|
|
||||||
DROP TABLE test_partition;
|
|
||||||
DROP TABLE test_partitioned;
|
|
||||||
DROP OWNED BY regress_heaptest_role; -- permissions
|
|
||||||
diff --git a/contrib/amcheck/sql/check_heap.sql b/contrib/amcheck/sql/check_heap.sql
|
|
||||||
index 1745bae..3b429c3 100644
|
|
||||||
--- a/contrib/amcheck/sql/check_heap.sql
|
|
||||||
+++ b/contrib/amcheck/sql/check_heap.sql
|
|
||||||
@@ -40,12 +40,9 @@ INSERT INTO heaptest (a, b)
|
|
||||||
-- same transaction. The heaptest table is smaller than the default
|
|
||||||
-- wal_skip_threshold, so a wal_level=minimal commit reads the table into
|
|
||||||
-- shared_buffers. A transaction delays that and excludes any autovacuum.
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_test_stats_tblspc LOCATION '';
|
|
||||||
SELECT sum(reads) AS stats_bulkreads_before
|
|
||||||
FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
BEGIN;
|
|
||||||
-ALTER TABLE heaptest SET TABLESPACE regress_test_stats_tblspc;
|
|
||||||
-- Check that valid options are not rejected nor corruption reported
|
|
||||||
-- for a non-empty table
|
|
||||||
SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none');
|
|
||||||
@@ -58,9 +55,6 @@ COMMIT;
|
|
||||||
-- ALTER TABLE ... SET TABLESPACE ...
|
|
||||||
-- causing an additional bulkread, which should be reflected in pg_stat_io.
|
|
||||||
SELECT pg_stat_force_next_flush();
|
|
||||||
-SELECT sum(reads) AS stats_bulkreads_after
|
|
||||||
- FROM pg_stat_io WHERE context = 'bulkread' \gset
|
|
||||||
-SELECT :stats_bulkreads_after > :stats_bulkreads_before;
|
|
||||||
|
|
||||||
CREATE ROLE regress_heaptest_role;
|
|
||||||
|
|
||||||
@@ -140,7 +134,6 @@ SELECT * FROM verify_heapam('test_foreign_table',
|
|
||||||
|
|
||||||
-- cleanup
|
|
||||||
DROP TABLE heaptest;
|
|
||||||
-DROP TABLESPACE regress_test_stats_tblspc;
|
|
||||||
DROP TABLE test_partition;
|
|
||||||
DROP TABLE test_partitioned;
|
|
||||||
DROP OWNED BY regress_heaptest_role; -- permissions
|
|
||||||
diff --git a/contrib/citext/expected/create_index_acl.out b/contrib/citext/expected/create_index_acl.out
|
|
||||||
index 33be13a..70a406c 100644
|
|
||||||
--- a/contrib/citext/expected/create_index_acl.out
|
|
||||||
+++ b/contrib/citext/expected/create_index_acl.out
|
|
||||||
@@ -5,9 +5,6 @@
|
|
||||||
-- owner having as few applicable privileges as possible. (The privileges.sql
|
|
||||||
-- regress_sro_user tests look for the opposite defect; they confirm that
|
|
||||||
-- DefineIndex() uses the table owner userid where necessary.)
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_create_idx_tblspace LOCATION '';
|
|
||||||
-RESET allow_in_place_tablespaces;
|
|
||||||
BEGIN;
|
|
||||||
CREATE ROLE regress_minimal;
|
|
||||||
CREATE SCHEMA s;
|
|
||||||
@@ -49,11 +46,9 @@ ALTER TABLE s.x OWNER TO regress_minimal;
|
|
||||||
-- Empty-table DefineIndex()
|
|
||||||
CREATE UNIQUE INDEX u0rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Make the table nonempty.
|
|
||||||
INSERT INTO s.x VALUES ('foo'), ('bar');
|
|
||||||
@@ -66,11 +61,9 @@ RESET search_path;
|
|
||||||
GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal;
|
|
||||||
CREATE UNIQUE INDEX u2rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Shall not find s.coll via search_path, despite the s.const->public.setter
|
|
||||||
-- call having set search_path=s during expression planning. Suppress the
|
|
||||||
@@ -78,9 +71,7 @@ ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
\set VERBOSITY sqlstate
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
ERROR: 42704
|
|
||||||
\set VERBOSITY default
|
|
||||||
ROLLBACK;
|
|
||||||
-DROP TABLESPACE regress_create_idx_tblspace;
|
|
||||||
diff --git a/contrib/citext/sql/create_index_acl.sql b/contrib/citext/sql/create_index_acl.sql
|
|
||||||
index 10b5225..ae442e1 100644
|
|
||||||
--- a/contrib/citext/sql/create_index_acl.sql
|
|
||||||
+++ b/contrib/citext/sql/create_index_acl.sql
|
|
||||||
@@ -6,10 +6,6 @@
|
|
||||||
-- regress_sro_user tests look for the opposite defect; they confirm that
|
|
||||||
-- DefineIndex() uses the table owner userid where necessary.)
|
|
||||||
|
|
||||||
-SET allow_in_place_tablespaces = true;
|
|
||||||
-CREATE TABLESPACE regress_create_idx_tblspace LOCATION '';
|
|
||||||
-RESET allow_in_place_tablespaces;
|
|
||||||
-
|
|
||||||
BEGIN;
|
|
||||||
CREATE ROLE regress_minimal;
|
|
||||||
CREATE SCHEMA s;
|
|
||||||
@@ -51,11 +47,9 @@ ALTER TABLE s.x OWNER TO regress_minimal;
|
|
||||||
-- Empty-table DefineIndex()
|
|
||||||
CREATE UNIQUE INDEX u0rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Make the table nonempty.
|
|
||||||
INSERT INTO s.x VALUES ('foo'), ('bar');
|
|
||||||
@@ -68,11 +62,9 @@ RESET search_path;
|
|
||||||
GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal;
|
|
||||||
CREATE UNIQUE INDEX u2rows ON s.x USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops)
|
|
||||||
- TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE s.index_row_if(y);
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
-- Shall not find s.coll via search_path, despite the s.const->public.setter
|
|
||||||
-- call having set search_path=s during expression planning. Suppress the
|
|
||||||
@@ -80,9 +72,7 @@ ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree
|
|
||||||
\set VERBOSITY sqlstate
|
|
||||||
ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree
|
|
||||||
((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=)
|
|
||||||
- USING INDEX TABLESPACE regress_create_idx_tblspace
|
|
||||||
WHERE (s.index_row_if(y));
|
|
||||||
\set VERBOSITY default
|
|
||||||
ROLLBACK;
|
|
||||||
|
|
||||||
-DROP TABLESPACE regress_create_idx_tblspace;
|
|
||||||
diff --git a/contrib/file_fdw/expected/file_fdw.out b/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
index 86c148a..81bdb2c 100644
|
|
||||||
--- a/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
+++ b/contrib/file_fdw/expected/file_fdw.out
|
|
||||||
@@ -4,6 +4,7 @@
|
|
||||||
-- directory paths are passed to us in environment variables
|
|
||||||
\getenv abs_srcdir PG_ABS_SRCDIR
|
|
||||||
-- Clean up in case a prior regression run failed
|
|
||||||
+SET compute_query_id TO 'off';
|
|
||||||
SET client_min_messages TO 'warning';
|
|
||||||
DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user;
|
|
||||||
RESET client_min_messages;
|
|
||||||
diff --git a/contrib/file_fdw/sql/file_fdw.sql b/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
index f0548e1..848a08c 100644
|
|
||||||
--- a/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
+++ b/contrib/file_fdw/sql/file_fdw.sql
|
|
||||||
@@ -6,6 +6,7 @@
|
|
||||||
\getenv abs_srcdir PG_ABS_SRCDIR
|
|
||||||
|
|
||||||
-- Clean up in case a prior regression run failed
|
|
||||||
+SET compute_query_id TO 'off';
|
|
||||||
SET client_min_messages TO 'warning';
|
|
||||||
DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user;
|
|
||||||
RESET client_min_messages;
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
commit ec6a491d126882966a696f9ad5d3698935361d55
|
|
||||||
Author: Alexey Masterov <alexeymasterov@neon.tech>
|
|
||||||
Date: Tue Dec 17 10:25:00 2024 +0100
|
|
||||||
|
|
||||||
Changes required to run tests on Neon
|
|
||||||
|
|
||||||
diff --git a/test/expected/permissions_functions.out b/test/expected/permissions_functions.out
|
|
||||||
index 1e9fbc2..94cbe25 100644
|
|
||||||
--- a/test/expected/permissions_functions.out
|
|
||||||
+++ b/test/expected/permissions_functions.out
|
|
||||||
@@ -64,7 +64,7 @@ begin;
|
|
||||||
select current_user;
|
|
||||||
current_user
|
|
||||||
--------------
|
|
||||||
- postgres
|
|
||||||
+ cloud_admin
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- revoke default access from the public role for new functions
|
|
||||||
@@ -1,24 +1,8 @@
|
|||||||
diff --git a/Makefile b/Makefile
|
|
||||||
index 7a4b88c..56678af 100644
|
|
||||||
--- a/Makefile
|
|
||||||
+++ b/Makefile
|
|
||||||
@@ -3,7 +3,10 @@ EXTVERSION = 0.8.0
|
|
||||||
|
|
||||||
MODULE_big = vector
|
|
||||||
DATA = $(wildcard sql/*--*--*.sql)
|
|
||||||
-DATA_built = sql/$(EXTENSION)--$(EXTVERSION).sql
|
|
||||||
+# This change is needed to install different per-version SQL files
|
|
||||||
+# like pgvector--0.8.0.sql and pgvector--0.7.4.sql
|
|
||||||
+# The corresponding file is downloaded during the Docker image build process
|
|
||||||
+DATA_built = sql/$(EXTENSION)--$(EXTVERSION).sql sql/vector--0.7.4.sql
|
|
||||||
OBJS = src/bitutils.o src/bitvec.o src/halfutils.o src/halfvec.o src/hnsw.o src/hnswbuild.o src/hnswinsert.o src/hnswscan.o src/hnswutils.o src/hnswvacuum.o src/ivfbuild.o src/ivfflat.o src/ivfinsert.o src/ivfkmeans.o src/ivfscan.o src/ivfutils.o src/ivfvacuum.o src/sparsevec.o src/vector.o
|
|
||||||
HEADERS = src/halfvec.h src/sparsevec.h src/vector.h
|
|
||||||
|
|
||||||
diff --git a/src/hnswbuild.c b/src/hnswbuild.c
|
diff --git a/src/hnswbuild.c b/src/hnswbuild.c
|
||||||
index b667478..fc1897c 100644
|
index dcfb2bd..d5189ee 100644
|
||||||
--- a/src/hnswbuild.c
|
--- a/src/hnswbuild.c
|
||||||
+++ b/src/hnswbuild.c
|
+++ b/src/hnswbuild.c
|
||||||
@@ -843,9 +843,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc)
|
@@ -860,9 +860,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc)
|
||||||
|
|
||||||
hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false);
|
hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false);
|
||||||
|
|
||||||
@@ -36,7 +20,7 @@ index b667478..fc1897c 100644
|
|||||||
/* Close relations within worker */
|
/* Close relations within worker */
|
||||||
index_close(indexRel, indexLockmode);
|
index_close(indexRel, indexLockmode);
|
||||||
table_close(heapRel, heapLockmode);
|
table_close(heapRel, heapLockmode);
|
||||||
@@ -1100,12 +1108,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo,
|
@@ -1117,12 +1125,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo,
|
||||||
SeedRandom(42);
|
SeedRandom(42);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -27,10 +27,6 @@ commands:
|
|||||||
user: nobody
|
user: nobody
|
||||||
sysvInitAction: respawn
|
sysvInitAction: respawn
|
||||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||||
- name: pgbouncer-exporter
|
|
||||||
user: postgres
|
|
||||||
sysvInitAction: respawn
|
|
||||||
shell: '/bin/pgbouncer_exporter --pgBouncer.connectionString="postgres:///pgbouncer?host=/tmp&port=6432&dbname=pgbouncer&user=pgbouncer"'
|
|
||||||
- name: sql-exporter
|
- name: sql-exporter
|
||||||
user: nobody
|
user: nobody
|
||||||
sysvInitAction: respawn
|
sysvInitAction: respawn
|
||||||
|
|||||||
@@ -27,10 +27,6 @@ commands:
|
|||||||
user: nobody
|
user: nobody
|
||||||
sysvInitAction: respawn
|
sysvInitAction: respawn
|
||||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||||
- name: pgbouncer-exporter
|
|
||||||
user: postgres
|
|
||||||
sysvInitAction: respawn
|
|
||||||
shell: '/bin/pgbouncer_exporter --pgBouncer.connectionString="postgres:///pgbouncer?host=/tmp&port=6432&dbname=pgbouncer&user=pgbouncer"'
|
|
||||||
- name: sql-exporter
|
- name: sql-exporter
|
||||||
user: nobody
|
user: nobody
|
||||||
sysvInitAction: respawn
|
sysvInitAction: respawn
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ license.workspace = true
|
|||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
# Enables test specific features.
|
# Enables test specific features.
|
||||||
testing = ["fail/failpoints"]
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -15,15 +15,13 @@ aws-config.workspace = true
|
|||||||
aws-sdk-s3.workspace = true
|
aws-sdk-s3.workspace = true
|
||||||
aws-sdk-kms.workspace = true
|
aws-sdk-kms.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
axum = { workspace = true, features = [] }
|
|
||||||
camino.workspace = true
|
camino.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
cfg-if.workspace = true
|
cfg-if.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
fail.workspace = true
|
|
||||||
flate2.workspace = true
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
http.workspace = true
|
hyper0 = { workspace = true, features = ["full"] }
|
||||||
metrics.workspace = true
|
metrics.workspace = true
|
||||||
nix.workspace = true
|
nix.workspace = true
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
@@ -38,8 +36,6 @@ serde_with.workspace = true
|
|||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
signal-hook.workspace = true
|
signal-hook.workspace = true
|
||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
tower.workspace = true
|
|
||||||
tower-http.workspace = true
|
|
||||||
reqwest = { workspace = true, features = ["json"] }
|
reqwest = { workspace = true, features = ["json"] }
|
||||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||||
tokio-postgres.workspace = true
|
tokio-postgres.workspace = true
|
||||||
@@ -51,7 +47,6 @@ tracing-subscriber.workspace = true
|
|||||||
tracing-utils.workspace = true
|
tracing-utils.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
uuid.workspace = true
|
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
|
|
||||||
postgres_initdb.workspace = true
|
postgres_initdb.workspace = true
|
||||||
|
|||||||
@@ -34,7 +34,6 @@
|
|||||||
//! -r http://pg-ext-s3-gateway \
|
//! -r http://pg-ext-s3-gateway \
|
||||||
//! ```
|
//! ```
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ffi::OsString;
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
@@ -45,7 +44,7 @@ use std::{thread, time::Duration};
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use clap::Parser;
|
use clap::Arg;
|
||||||
use compute_tools::disk_quota::set_disk_quota;
|
use compute_tools::disk_quota::set_disk_quota;
|
||||||
use compute_tools::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
use compute_tools::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
||||||
use signal_hook::consts::{SIGQUIT, SIGTERM};
|
use signal_hook::consts::{SIGQUIT, SIGTERM};
|
||||||
@@ -61,88 +60,20 @@ use compute_tools::compute::{
|
|||||||
};
|
};
|
||||||
use compute_tools::configurator::launch_configurator;
|
use compute_tools::configurator::launch_configurator;
|
||||||
use compute_tools::extension_server::get_pg_version_string;
|
use compute_tools::extension_server::get_pg_version_string;
|
||||||
use compute_tools::http::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use compute_tools::swap::resize_swap;
|
use compute_tools::swap::resize_swap;
|
||||||
use rlimit::{setrlimit, Resource};
|
use rlimit::{setrlimit, Resource};
|
||||||
use utils::failpoint_support;
|
|
||||||
|
|
||||||
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
// in-case of not-set environment var
|
// in-case of not-set environment var
|
||||||
const BUILD_TAG_DEFAULT: &str = "latest";
|
const BUILD_TAG_DEFAULT: &str = "latest";
|
||||||
|
|
||||||
// Compatibility hack: if the control plane specified any remote-ext-config
|
|
||||||
// use the default value for extension storage proxy gateway.
|
|
||||||
// Remove this once the control plane is updated to pass the gateway URL
|
|
||||||
fn parse_remote_ext_config(arg: &str) -> Result<String> {
|
|
||||||
if arg.starts_with("http") {
|
|
||||||
Ok(arg.trim_end_matches('/').to_string())
|
|
||||||
} else {
|
|
||||||
Ok("http://pg-ext-s3-gateway".to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(rename_all = "kebab-case")]
|
|
||||||
struct Cli {
|
|
||||||
#[arg(short = 'b', long, default_value = "postgres", env = "POSTGRES_PATH")]
|
|
||||||
pub pgbin: String,
|
|
||||||
|
|
||||||
#[arg(short = 'r', long, value_parser = parse_remote_ext_config)]
|
|
||||||
pub remote_ext_config: Option<String>,
|
|
||||||
|
|
||||||
#[arg(long, default_value_t = 3080)]
|
|
||||||
pub http_port: u16,
|
|
||||||
|
|
||||||
#[arg(short = 'D', long, value_name = "DATADIR")]
|
|
||||||
pub pgdata: String,
|
|
||||||
|
|
||||||
#[arg(short = 'C', long, value_name = "DATABASE_URL")]
|
|
||||||
pub connstr: String,
|
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
#[arg(long, default_value = "neon-postgres")]
|
|
||||||
pub cgroup: String,
|
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
#[arg(
|
|
||||||
long,
|
|
||||||
default_value = "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor"
|
|
||||||
)]
|
|
||||||
pub filecache_connstr: String,
|
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
#[arg(long, default_value = "0.0.0.0:10301")]
|
|
||||||
pub vm_monitor_addr: String,
|
|
||||||
|
|
||||||
#[arg(long, action = clap::ArgAction::SetTrue)]
|
|
||||||
pub resize_swap_on_bind: bool,
|
|
||||||
|
|
||||||
#[arg(long)]
|
|
||||||
pub set_disk_quota_for_fs: Option<String>,
|
|
||||||
|
|
||||||
#[arg(short = 's', long = "spec", group = "spec")]
|
|
||||||
pub spec_json: Option<String>,
|
|
||||||
|
|
||||||
#[arg(short = 'S', long, group = "spec-path")]
|
|
||||||
pub spec_path: Option<OsString>,
|
|
||||||
|
|
||||||
#[arg(short = 'i', long, group = "compute-id", conflicts_with_all = ["spec", "spec-path"])]
|
|
||||||
pub compute_id: Option<String>,
|
|
||||||
|
|
||||||
#[arg(short = 'p', long, conflicts_with_all = ["spec", "spec-path"], requires = "compute-id", value_name = "CONTROL_PLANE_API_BASE_URL")]
|
|
||||||
pub control_plane_uri: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
let cli = Cli::parse();
|
let (build_tag, clap_args) = init()?;
|
||||||
|
|
||||||
let build_tag = init()?;
|
|
||||||
|
|
||||||
let scenario = failpoint_support::init();
|
|
||||||
|
|
||||||
// enable core dumping for all child processes
|
// enable core dumping for all child processes
|
||||||
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
||||||
@@ -151,11 +82,13 @@ fn main() -> Result<()> {
|
|||||||
// Enter startup tracing context
|
// Enter startup tracing context
|
||||||
let _startup_context_guard = startup_context_from_env();
|
let _startup_context_guard = startup_context_from_env();
|
||||||
|
|
||||||
let cli_spec = try_spec_from_cli(&cli)?;
|
let cli_args = process_cli(&clap_args)?;
|
||||||
|
|
||||||
let compute = wait_spec(build_tag, &cli, cli_spec)?;
|
let cli_spec = try_spec_from_cli(&clap_args, &cli_args)?;
|
||||||
|
|
||||||
start_postgres(&cli, compute)?
|
let wait_spec_result = wait_spec(build_tag, cli_args, cli_spec)?;
|
||||||
|
|
||||||
|
start_postgres(&clap_args, wait_spec_result)?
|
||||||
|
|
||||||
// Startup is finished, exit the startup tracing span
|
// Startup is finished, exit the startup tracing span
|
||||||
};
|
};
|
||||||
@@ -167,14 +100,17 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
maybe_delay_exit(delay_exit);
|
maybe_delay_exit(delay_exit);
|
||||||
|
|
||||||
scenario.teardown();
|
|
||||||
|
|
||||||
deinit_and_exit(wait_pg_result);
|
deinit_and_exit(wait_pg_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init() -> Result<String> {
|
fn init() -> Result<(String, clap::ArgMatches)> {
|
||||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
|
opentelemetry::global::set_error_handler(|err| {
|
||||||
|
tracing::info!("OpenTelemetry error: {err}");
|
||||||
|
})
|
||||||
|
.expect("global error handler lock poisoned");
|
||||||
|
|
||||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
for sig in signals.forever() {
|
for sig in signals.forever() {
|
||||||
@@ -187,7 +123,66 @@ fn init() -> Result<String> {
|
|||||||
.to_string();
|
.to_string();
|
||||||
info!("build_tag: {build_tag}");
|
info!("build_tag: {build_tag}");
|
||||||
|
|
||||||
Ok(build_tag)
|
Ok((build_tag, cli().get_matches()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_cli(matches: &clap::ArgMatches) -> Result<ProcessCliResult> {
|
||||||
|
let pgbin_default = "postgres";
|
||||||
|
let pgbin = matches
|
||||||
|
.get_one::<String>("pgbin")
|
||||||
|
.map(|s| s.as_str())
|
||||||
|
.unwrap_or(pgbin_default);
|
||||||
|
|
||||||
|
let ext_remote_storage = matches
|
||||||
|
.get_one::<String>("remote-ext-config")
|
||||||
|
// Compatibility hack: if the control plane specified any remote-ext-config
|
||||||
|
// use the default value for extension storage proxy gateway.
|
||||||
|
// Remove this once the control plane is updated to pass the gateway URL
|
||||||
|
.map(|conf| {
|
||||||
|
if conf.starts_with("http") {
|
||||||
|
conf.trim_end_matches('/')
|
||||||
|
} else {
|
||||||
|
"http://pg-ext-s3-gateway"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let http_port = *matches
|
||||||
|
.get_one::<u16>("http-port")
|
||||||
|
.expect("http-port is required");
|
||||||
|
let pgdata = matches
|
||||||
|
.get_one::<String>("pgdata")
|
||||||
|
.expect("PGDATA path is required");
|
||||||
|
let connstr = matches
|
||||||
|
.get_one::<String>("connstr")
|
||||||
|
.expect("Postgres connection string is required");
|
||||||
|
let spec_json = matches.get_one::<String>("spec");
|
||||||
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
let resize_swap_on_bind = matches.get_flag("resize-swap-on-bind");
|
||||||
|
let set_disk_quota_for_fs = matches.get_one::<String>("set-disk-quota-for-fs");
|
||||||
|
|
||||||
|
Ok(ProcessCliResult {
|
||||||
|
connstr,
|
||||||
|
pgdata,
|
||||||
|
pgbin,
|
||||||
|
ext_remote_storage,
|
||||||
|
http_port,
|
||||||
|
spec_json,
|
||||||
|
spec_path,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
set_disk_quota_for_fs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ProcessCliResult<'clap> {
|
||||||
|
connstr: &'clap str,
|
||||||
|
pgdata: &'clap str,
|
||||||
|
pgbin: &'clap str,
|
||||||
|
ext_remote_storage: Option<&'clap str>,
|
||||||
|
http_port: u16,
|
||||||
|
spec_json: Option<&'clap String>,
|
||||||
|
spec_path: Option<&'clap String>,
|
||||||
|
resize_swap_on_bind: bool,
|
||||||
|
set_disk_quota_for_fs: Option<&'clap String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn startup_context_from_env() -> Option<opentelemetry::ContextGuard> {
|
fn startup_context_from_env() -> Option<opentelemetry::ContextGuard> {
|
||||||
@@ -240,52 +235,58 @@ fn startup_context_from_env() -> Option<opentelemetry::ContextGuard> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_spec_from_cli(cli: &Cli) -> Result<CliSpecParams> {
|
fn try_spec_from_cli(
|
||||||
// First, try to get cluster spec from the cli argument
|
matches: &clap::ArgMatches,
|
||||||
if let Some(ref spec_json) = cli.spec_json {
|
ProcessCliResult {
|
||||||
info!("got spec from cli argument {}", spec_json);
|
spec_json,
|
||||||
return Ok(CliSpecParams {
|
spec_path,
|
||||||
spec: Some(serde_json::from_str(spec_json)?),
|
..
|
||||||
live_config_allowed: false,
|
}: &ProcessCliResult,
|
||||||
});
|
) -> Result<CliSpecParams> {
|
||||||
}
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
// Second, try to read it from the file if path is provided
|
let spec;
|
||||||
if let Some(ref spec_path) = cli.spec_path {
|
let mut live_config_allowed = false;
|
||||||
let file = File::open(Path::new(spec_path))?;
|
match spec_json {
|
||||||
return Ok(CliSpecParams {
|
// First, try to get cluster spec from the cli argument
|
||||||
spec: Some(serde_json::from_reader(file)?),
|
Some(json) => {
|
||||||
live_config_allowed: true,
|
info!("got spec from cli argument {}", json);
|
||||||
});
|
spec = Some(serde_json::from_str(json)?);
|
||||||
}
|
|
||||||
|
|
||||||
if cli.compute_id.is_none() {
|
|
||||||
panic!(
|
|
||||||
"compute spec should be provided by one of the following ways: \
|
|
||||||
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
|
||||||
);
|
|
||||||
};
|
|
||||||
if cli.control_plane_uri.is_none() {
|
|
||||||
panic!("must specify both --control-plane-uri and --compute-id or none");
|
|
||||||
};
|
|
||||||
|
|
||||||
match get_spec_from_control_plane(
|
|
||||||
cli.control_plane_uri.as_ref().unwrap(),
|
|
||||||
cli.compute_id.as_ref().unwrap(),
|
|
||||||
) {
|
|
||||||
Ok(spec) => Ok(CliSpecParams {
|
|
||||||
spec,
|
|
||||||
live_config_allowed: true,
|
|
||||||
}),
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"cannot get response from control plane: {}\n\
|
|
||||||
neither spec nor confirmation that compute is in the Empty state was received",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Err(e)
|
|
||||||
}
|
}
|
||||||
}
|
None => {
|
||||||
|
// Second, try to read it from the file if path is provided
|
||||||
|
if let Some(sp) = spec_path {
|
||||||
|
let path = Path::new(sp);
|
||||||
|
let file = File::open(path)?;
|
||||||
|
spec = Some(serde_json::from_reader(file)?);
|
||||||
|
live_config_allowed = true;
|
||||||
|
} else if let Some(id) = compute_id {
|
||||||
|
if let Some(cp_base) = control_plane_uri {
|
||||||
|
live_config_allowed = true;
|
||||||
|
spec = match get_spec_from_control_plane(cp_base, id) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
error!("cannot get response from control plane: {}", e);
|
||||||
|
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!(
|
||||||
|
"compute spec should be provided by one of the following ways: \
|
||||||
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(CliSpecParams {
|
||||||
|
spec,
|
||||||
|
live_config_allowed,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CliSpecParams {
|
struct CliSpecParams {
|
||||||
@@ -296,12 +297,21 @@ struct CliSpecParams {
|
|||||||
|
|
||||||
fn wait_spec(
|
fn wait_spec(
|
||||||
build_tag: String,
|
build_tag: String,
|
||||||
cli: &Cli,
|
ProcessCliResult {
|
||||||
|
connstr,
|
||||||
|
pgdata,
|
||||||
|
pgbin,
|
||||||
|
ext_remote_storage,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
set_disk_quota_for_fs,
|
||||||
|
http_port,
|
||||||
|
..
|
||||||
|
}: ProcessCliResult,
|
||||||
CliSpecParams {
|
CliSpecParams {
|
||||||
spec,
|
spec,
|
||||||
live_config_allowed,
|
live_config_allowed,
|
||||||
}: CliSpecParams,
|
}: CliSpecParams,
|
||||||
) -> Result<Arc<ComputeNode>> {
|
) -> Result<WaitSpecResult> {
|
||||||
let mut new_state = ComputeState::new();
|
let mut new_state = ComputeState::new();
|
||||||
let spec_set;
|
let spec_set;
|
||||||
|
|
||||||
@@ -313,7 +323,7 @@ fn wait_spec(
|
|||||||
} else {
|
} else {
|
||||||
spec_set = false;
|
spec_set = false;
|
||||||
}
|
}
|
||||||
let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
|
let connstr = Url::parse(connstr).context("cannot parse connstr as a URL")?;
|
||||||
let conn_conf = postgres::config::Config::from_str(connstr.as_str())
|
let conn_conf = postgres::config::Config::from_str(connstr.as_str())
|
||||||
.context("cannot build postgres config from connstr")?;
|
.context("cannot build postgres config from connstr")?;
|
||||||
let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr.as_str())
|
let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr.as_str())
|
||||||
@@ -322,14 +332,14 @@ fn wait_spec(
|
|||||||
connstr,
|
connstr,
|
||||||
conn_conf,
|
conn_conf,
|
||||||
tokio_conn_conf,
|
tokio_conn_conf,
|
||||||
pgdata: cli.pgdata.clone(),
|
pgdata: pgdata.to_string(),
|
||||||
pgbin: cli.pgbin.clone(),
|
pgbin: pgbin.to_string(),
|
||||||
pgversion: get_pg_version_string(&cli.pgbin),
|
pgversion: get_pg_version_string(pgbin),
|
||||||
http_port: cli.http_port,
|
http_port,
|
||||||
live_config_allowed,
|
live_config_allowed,
|
||||||
state: Mutex::new(new_state),
|
state: Mutex::new(new_state),
|
||||||
state_changed: Condvar::new(),
|
state_changed: Condvar::new(),
|
||||||
ext_remote_storage: cli.remote_ext_config.clone(),
|
ext_remote_storage: ext_remote_storage.map(|s| s.to_string()),
|
||||||
ext_download_progress: RwLock::new(HashMap::new()),
|
ext_download_progress: RwLock::new(HashMap::new()),
|
||||||
build_tag,
|
build_tag,
|
||||||
};
|
};
|
||||||
@@ -346,7 +356,7 @@ fn wait_spec(
|
|||||||
// Launch http service first, so that we can serve control-plane requests
|
// Launch http service first, so that we can serve control-plane requests
|
||||||
// while configuration is still in progress.
|
// while configuration is still in progress.
|
||||||
let _http_handle =
|
let _http_handle =
|
||||||
launch_http_server(cli.http_port, &compute).expect("cannot launch http endpoint thread");
|
launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
|
||||||
|
|
||||||
if !spec_set {
|
if !spec_set {
|
||||||
// No spec provided, hang waiting for it.
|
// No spec provided, hang waiting for it.
|
||||||
@@ -378,12 +388,27 @@ fn wait_spec(
|
|||||||
|
|
||||||
launch_lsn_lease_bg_task_for_static(&compute);
|
launch_lsn_lease_bg_task_for_static(&compute);
|
||||||
|
|
||||||
Ok(compute)
|
Ok(WaitSpecResult {
|
||||||
|
compute,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
set_disk_quota_for_fs: set_disk_quota_for_fs.cloned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WaitSpecResult {
|
||||||
|
compute: Arc<ComputeNode>,
|
||||||
|
resize_swap_on_bind: bool,
|
||||||
|
set_disk_quota_for_fs: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_postgres(
|
fn start_postgres(
|
||||||
cli: &Cli,
|
// need to allow unused because `matches` is only used if target_os = "linux"
|
||||||
compute: Arc<ComputeNode>,
|
#[allow(unused_variables)] matches: &clap::ArgMatches,
|
||||||
|
WaitSpecResult {
|
||||||
|
compute,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
set_disk_quota_for_fs,
|
||||||
|
}: WaitSpecResult,
|
||||||
) -> Result<(Option<PostgresHandle>, StartPostgresResult)> {
|
) -> Result<(Option<PostgresHandle>, StartPostgresResult)> {
|
||||||
// We got all we need, update the state.
|
// We got all we need, update the state.
|
||||||
let mut state = compute.state.lock().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
@@ -393,14 +418,9 @@ fn start_postgres(
|
|||||||
"running compute with features: {:?}",
|
"running compute with features: {:?}",
|
||||||
state.pspec.as_ref().unwrap().spec.features
|
state.pspec.as_ref().unwrap().spec.features
|
||||||
);
|
);
|
||||||
// before we release the mutex, fetch some parameters for later.
|
// before we release the mutex, fetch the swap size (if any) for later.
|
||||||
let &ComputeSpec {
|
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
|
||||||
swap_size_bytes,
|
let disk_quota_bytes = state.pspec.as_ref().unwrap().spec.disk_quota_bytes;
|
||||||
disk_quota_bytes,
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
disable_lfc_resizing,
|
|
||||||
..
|
|
||||||
} = &state.pspec.as_ref().unwrap().spec;
|
|
||||||
drop(state);
|
drop(state);
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
@@ -411,7 +431,7 @@ fn start_postgres(
|
|||||||
let mut delay_exit = false;
|
let mut delay_exit = false;
|
||||||
|
|
||||||
// Resize swap to the desired size if the compute spec says so
|
// Resize swap to the desired size if the compute spec says so
|
||||||
if let (Some(size_bytes), true) = (swap_size_bytes, cli.resize_swap_on_bind) {
|
if let (Some(size_bytes), true) = (swap_size_bytes, resize_swap_on_bind) {
|
||||||
// To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
|
// To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
|
||||||
// *before* starting postgres.
|
// *before* starting postgres.
|
||||||
//
|
//
|
||||||
@@ -438,9 +458,9 @@ fn start_postgres(
|
|||||||
|
|
||||||
// Set disk quota if the compute spec says so
|
// Set disk quota if the compute spec says so
|
||||||
if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) =
|
if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) =
|
||||||
(disk_quota_bytes, cli.set_disk_quota_for_fs.as_ref())
|
(disk_quota_bytes, set_disk_quota_for_fs)
|
||||||
{
|
{
|
||||||
match set_disk_quota(disk_quota_bytes, disk_quota_fs_mountpoint) {
|
match set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
|
let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
|
||||||
info!(%disk_quota_bytes, %size_mib, "set disk quota");
|
info!(%disk_quota_bytes, %size_mib, "set disk quota");
|
||||||
@@ -462,10 +482,7 @@ fn start_postgres(
|
|||||||
let mut pg = None;
|
let mut pg = None;
|
||||||
if !prestartup_failed {
|
if !prestartup_failed {
|
||||||
pg = match compute.start_compute() {
|
pg = match compute.start_compute() {
|
||||||
Ok(pg) => {
|
Ok(pg) => Some(pg),
|
||||||
info!(postmaster_pid = %pg.0.id(), "Postgres was started");
|
|
||||||
Some(pg)
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("could not start the compute node: {:#}", err);
|
error!("could not start the compute node: {:#}", err);
|
||||||
compute.set_failed_status(err);
|
compute.set_failed_status(err);
|
||||||
@@ -483,7 +500,13 @@ fn start_postgres(
|
|||||||
if #[cfg(target_os = "linux")] {
|
if #[cfg(target_os = "linux")] {
|
||||||
use std::env;
|
use std::env;
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
|
let vm_monitor_addr = matches
|
||||||
|
.get_one::<String>("vm-monitor-addr")
|
||||||
|
.expect("--vm-monitor-addr should always be set because it has a default arg");
|
||||||
|
let file_cache_connstr = matches.get_one::<String>("filecache-connstr");
|
||||||
|
let cgroup = matches.get_one::<String>("cgroup");
|
||||||
|
|
||||||
|
// Only make a runtime if we need to.
|
||||||
// Note: it seems like you can make a runtime in an inner scope and
|
// Note: it seems like you can make a runtime in an inner scope and
|
||||||
// if you start a task in it it won't be dropped. However, make it
|
// if you start a task in it it won't be dropped. However, make it
|
||||||
// in the outermost scope just to be safe.
|
// in the outermost scope just to be safe.
|
||||||
@@ -502,19 +525,12 @@ fn start_postgres(
|
|||||||
// This token is used internally by the monitor to clean up all threads
|
// This token is used internally by the monitor to clean up all threads
|
||||||
let token = CancellationToken::new();
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
// don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
|
|
||||||
let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(cli.filecache_connstr.clone())
|
|
||||||
};
|
|
||||||
|
|
||||||
let vm_monitor = rt.as_ref().map(|rt| {
|
let vm_monitor = rt.as_ref().map(|rt| {
|
||||||
rt.spawn(vm_monitor::start(
|
rt.spawn(vm_monitor::start(
|
||||||
Box::leak(Box::new(vm_monitor::Args {
|
Box::leak(Box::new(vm_monitor::Args {
|
||||||
cgroup: Some(cli.cgroup.clone()),
|
cgroup: cgroup.cloned(),
|
||||||
pgconnstr,
|
pgconnstr: file_cache_connstr.cloned(),
|
||||||
addr: cli.vm_monitor_addr.clone(),
|
addr: vm_monitor_addr.clone(),
|
||||||
})),
|
})),
|
||||||
token.clone(),
|
token.clone(),
|
||||||
))
|
))
|
||||||
@@ -557,8 +573,6 @@ fn wait_postgres(pg: Option<PostgresHandle>) -> Result<WaitPostgresResult> {
|
|||||||
// propagate to Postgres and it will be shut down as well.
|
// propagate to Postgres and it will be shut down as well.
|
||||||
let mut exit_code = None;
|
let mut exit_code = None;
|
||||||
if let Some((mut pg, logs_handle)) = pg {
|
if let Some((mut pg, logs_handle)) = pg {
|
||||||
info!(postmaster_pid = %pg.id(), "Waiting for Postgres to exit");
|
|
||||||
|
|
||||||
let ecode = pg
|
let ecode = pg
|
||||||
.wait()
|
.wait()
|
||||||
.expect("failed to start waiting on Postgres process");
|
.expect("failed to start waiting on Postgres process");
|
||||||
@@ -670,6 +684,105 @@ fn deinit_and_exit(WaitPostgresResult { exit_code }: WaitPostgresResult) -> ! {
|
|||||||
exit(exit_code.unwrap_or(1))
|
exit(exit_code.unwrap_or(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cli() -> clap::Command {
|
||||||
|
// Env variable is set by `cargo`
|
||||||
|
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
||||||
|
clap::Command::new("compute_ctl")
|
||||||
|
.version(version)
|
||||||
|
.arg(
|
||||||
|
Arg::new("http-port")
|
||||||
|
.long("http-port")
|
||||||
|
.value_name("HTTP_PORT")
|
||||||
|
.default_value("3080")
|
||||||
|
.value_parser(clap::value_parser!(u16))
|
||||||
|
.required(false),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("connstr")
|
||||||
|
.short('C')
|
||||||
|
.long("connstr")
|
||||||
|
.value_name("DATABASE_URL")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgdata")
|
||||||
|
.short('D')
|
||||||
|
.long("pgdata")
|
||||||
|
.value_name("DATADIR")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgbin")
|
||||||
|
.short('b')
|
||||||
|
.long("pgbin")
|
||||||
|
.default_value("postgres")
|
||||||
|
.value_name("POSTGRES_PATH"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec")
|
||||||
|
.short('s')
|
||||||
|
.long("spec")
|
||||||
|
.value_name("SPEC_JSON"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec-path")
|
||||||
|
.short('S')
|
||||||
|
.long("spec-path")
|
||||||
|
.value_name("SPEC_PATH"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("compute-id")
|
||||||
|
.short('i')
|
||||||
|
.long("compute-id")
|
||||||
|
.value_name("COMPUTE_ID"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("control-plane-uri")
|
||||||
|
.short('p')
|
||||||
|
.long("control-plane-uri")
|
||||||
|
.value_name("CONTROL_PLANE_API_BASE_URI"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("remote-ext-config")
|
||||||
|
.short('r')
|
||||||
|
.long("remote-ext-config")
|
||||||
|
.value_name("REMOTE_EXT_CONFIG"),
|
||||||
|
)
|
||||||
|
// TODO(fprasx): we currently have default arguments because the cloud PR
|
||||||
|
// to pass them in hasn't been merged yet. We should get rid of them once
|
||||||
|
// the PR is merged.
|
||||||
|
.arg(
|
||||||
|
Arg::new("vm-monitor-addr")
|
||||||
|
.long("vm-monitor-addr")
|
||||||
|
.default_value("0.0.0.0:10301")
|
||||||
|
.value_name("VM_MONITOR_ADDR"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("cgroup")
|
||||||
|
.long("cgroup")
|
||||||
|
.default_value("neon-postgres")
|
||||||
|
.value_name("CGROUP"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("filecache-connstr")
|
||||||
|
.long("filecache-connstr")
|
||||||
|
.default_value(
|
||||||
|
"host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor",
|
||||||
|
)
|
||||||
|
.value_name("FILECACHE_CONNSTR"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("resize-swap-on-bind")
|
||||||
|
.long("resize-swap-on-bind")
|
||||||
|
.action(clap::ArgAction::SetTrue),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("set-disk-quota-for-fs")
|
||||||
|
.long("set-disk-quota-for-fs")
|
||||||
|
.value_name("SET_DISK_QUOTA_FOR_FS")
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// When compute_ctl is killed, send also termination signal to sync-safekeepers
|
/// When compute_ctl is killed, send also termination signal to sync-safekeepers
|
||||||
/// to prevent leakage. TODO: it is better to convert compute_ctl to async and
|
/// to prevent leakage. TODO: it is better to convert compute_ctl to async and
|
||||||
/// wait for termination which would be easy then.
|
/// wait for termination which would be easy then.
|
||||||
@@ -679,14 +792,7 @@ fn handle_exit_signal(sig: i32) {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[test]
|
||||||
mod test {
|
fn verify_cli() {
|
||||||
use clap::CommandFactory;
|
cli().debug_assert()
|
||||||
|
|
||||||
use super::Cli;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn verify_cli() {
|
|
||||||
Cli::command().debug_assert()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
//!
|
//!
|
||||||
//! # Local Testing
|
//! # Local Testing
|
||||||
//!
|
//!
|
||||||
//! - Comment out most of the pgxns in compute-node.Dockerfile to speed up the build.
|
//! - Comment out most of the pgxns in The Dockerfile.compute-tools to speed up the build.
|
||||||
//! - Build the image with the following command:
|
//! - Build the image with the following command:
|
||||||
//!
|
//!
|
||||||
//! ```bash
|
//! ```bash
|
||||||
@@ -31,35 +31,26 @@ use camino::{Utf8Path, Utf8PathBuf};
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use compute_tools::extension_server::{get_pg_version, PostgresMajorVersion};
|
use compute_tools::extension_server::{get_pg_version, PostgresMajorVersion};
|
||||||
use nix::unistd::Pid;
|
use nix::unistd::Pid;
|
||||||
use tracing::{error, info, info_span, warn, Instrument};
|
use tracing::{info, info_span, warn, Instrument};
|
||||||
use utils::fs_ext::is_directory_empty;
|
use utils::fs_ext::is_directory_empty;
|
||||||
|
|
||||||
#[path = "fast_import/aws_s3_sync.rs"]
|
|
||||||
mod aws_s3_sync;
|
|
||||||
#[path = "fast_import/child_stdio_to_log.rs"]
|
#[path = "fast_import/child_stdio_to_log.rs"]
|
||||||
mod child_stdio_to_log;
|
mod child_stdio_to_log;
|
||||||
#[path = "fast_import/s3_uri.rs"]
|
#[path = "fast_import/s3_uri.rs"]
|
||||||
mod s3_uri;
|
mod s3_uri;
|
||||||
|
#[path = "fast_import/s5cmd.rs"]
|
||||||
const PG_WAIT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(600);
|
mod s5cmd;
|
||||||
const PG_WAIT_RETRY_INTERVAL: std::time::Duration = std::time::Duration::from_millis(300);
|
|
||||||
|
|
||||||
#[derive(clap::Parser)]
|
#[derive(clap::Parser)]
|
||||||
struct Args {
|
struct Args {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
working_directory: Utf8PathBuf,
|
working_directory: Utf8PathBuf,
|
||||||
#[clap(long, env = "NEON_IMPORTER_S3_PREFIX")]
|
#[clap(long, env = "NEON_IMPORTER_S3_PREFIX")]
|
||||||
s3_prefix: Option<s3_uri::S3Uri>,
|
s3_prefix: s3_uri::S3Uri,
|
||||||
#[clap(long)]
|
|
||||||
source_connection_string: Option<String>,
|
|
||||||
#[clap(short, long)]
|
|
||||||
interactive: bool,
|
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
pg_bin_dir: Utf8PathBuf,
|
pg_bin_dir: Utf8PathBuf,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
pg_lib_dir: Utf8PathBuf,
|
pg_lib_dir: Utf8PathBuf,
|
||||||
#[clap(long)]
|
|
||||||
pg_port: Option<u16>, // port to run postgres on, 5432 is default
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[serde_with::serde_as]
|
#[serde_with::serde_as]
|
||||||
@@ -76,13 +67,6 @@ enum EncryptionSecret {
|
|||||||
KMS { key_id: String },
|
KMS { key_id: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
// copied from pageserver_api::config::defaults::DEFAULT_LOCALE to avoid dependency just for a constant
|
|
||||||
const DEFAULT_LOCALE: &str = if cfg!(target_os = "macos") {
|
|
||||||
"C"
|
|
||||||
} else {
|
|
||||||
"C.UTF-8"
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
pub(crate) async fn main() -> anyhow::Result<()> {
|
pub(crate) async fn main() -> anyhow::Result<()> {
|
||||||
utils::logging::init(
|
utils::logging::init(
|
||||||
@@ -93,74 +77,30 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
info!("starting");
|
info!("starting");
|
||||||
|
|
||||||
let args = Args::parse();
|
let Args {
|
||||||
|
working_directory,
|
||||||
|
s3_prefix,
|
||||||
|
pg_bin_dir,
|
||||||
|
pg_lib_dir,
|
||||||
|
} = Args::parse();
|
||||||
|
|
||||||
// Validate arguments
|
let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
|
||||||
if args.s3_prefix.is_none() && args.source_connection_string.is_none() {
|
|
||||||
anyhow::bail!("either s3_prefix or source_connection_string must be specified");
|
|
||||||
}
|
|
||||||
if args.s3_prefix.is_some() && args.source_connection_string.is_some() {
|
|
||||||
anyhow::bail!("only one of s3_prefix or source_connection_string can be specified");
|
|
||||||
}
|
|
||||||
|
|
||||||
let working_directory = args.working_directory;
|
let spec: Spec = {
|
||||||
let pg_bin_dir = args.pg_bin_dir;
|
let spec_key = s3_prefix.append("/spec.json");
|
||||||
let pg_lib_dir = args.pg_lib_dir;
|
let s3_client = aws_sdk_s3::Client::new(&aws_config);
|
||||||
let pg_port = args.pg_port.unwrap_or_else(|| {
|
let object = s3_client
|
||||||
info!("pg_port not specified, using default 5432");
|
.get_object()
|
||||||
5432
|
.bucket(&spec_key.bucket)
|
||||||
});
|
.key(spec_key.key)
|
||||||
|
.send()
|
||||||
// Initialize AWS clients only if s3_prefix is specified
|
.await
|
||||||
let (aws_config, kms_client) = if args.s3_prefix.is_some() {
|
.context("get spec from s3")?
|
||||||
let config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
|
.body
|
||||||
let kms = aws_sdk_kms::Client::new(&config);
|
.collect()
|
||||||
(Some(config), Some(kms))
|
.await
|
||||||
} else {
|
.context("download spec body")?;
|
||||||
(None, None)
|
serde_json::from_slice(&object.into_bytes()).context("parse spec as json")?
|
||||||
};
|
|
||||||
|
|
||||||
// Get source connection string either from S3 spec or direct argument
|
|
||||||
let source_connection_string = if let Some(s3_prefix) = &args.s3_prefix {
|
|
||||||
let spec: Spec = {
|
|
||||||
let spec_key = s3_prefix.append("/spec.json");
|
|
||||||
let s3_client = aws_sdk_s3::Client::new(aws_config.as_ref().unwrap());
|
|
||||||
let object = s3_client
|
|
||||||
.get_object()
|
|
||||||
.bucket(&spec_key.bucket)
|
|
||||||
.key(spec_key.key)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.context("get spec from s3")?
|
|
||||||
.body
|
|
||||||
.collect()
|
|
||||||
.await
|
|
||||||
.context("download spec body")?;
|
|
||||||
serde_json::from_slice(&object.into_bytes()).context("parse spec as json")?
|
|
||||||
};
|
|
||||||
|
|
||||||
match spec.encryption_secret {
|
|
||||||
EncryptionSecret::KMS { key_id } => {
|
|
||||||
let mut output = kms_client
|
|
||||||
.unwrap()
|
|
||||||
.decrypt()
|
|
||||||
.key_id(key_id)
|
|
||||||
.ciphertext_blob(aws_sdk_s3::primitives::Blob::new(
|
|
||||||
spec.source_connstring_ciphertext_base64,
|
|
||||||
))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.context("decrypt source connection string")?;
|
|
||||||
let plaintext = output
|
|
||||||
.plaintext
|
|
||||||
.take()
|
|
||||||
.context("get plaintext source connection string")?;
|
|
||||||
String::from_utf8(plaintext.into_inner())
|
|
||||||
.context("parse source connection string as utf8")?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
args.source_connection_string.unwrap()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match tokio::fs::create_dir(&working_directory).await {
|
match tokio::fs::create_dir(&working_directory).await {
|
||||||
@@ -183,6 +123,15 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
.await
|
.await
|
||||||
.context("create pgdata directory")?;
|
.context("create pgdata directory")?;
|
||||||
|
|
||||||
|
//
|
||||||
|
// Setup clients
|
||||||
|
//
|
||||||
|
let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
|
||||||
|
let kms_client = aws_sdk_kms::Client::new(&aws_config);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Initialize pgdata
|
||||||
|
//
|
||||||
let pgbin = pg_bin_dir.join("postgres");
|
let pgbin = pg_bin_dir.join("postgres");
|
||||||
let pg_version = match get_pg_version(pgbin.as_ref()) {
|
let pg_version = match get_pg_version(pgbin.as_ref()) {
|
||||||
PostgresMajorVersion::V14 => 14,
|
PostgresMajorVersion::V14 => 14,
|
||||||
@@ -193,7 +142,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
let superuser = "cloud_admin"; // XXX: this shouldn't be hard-coded
|
let superuser = "cloud_admin"; // XXX: this shouldn't be hard-coded
|
||||||
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
|
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
|
||||||
superuser,
|
superuser,
|
||||||
locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded,
|
locale: "en_US.UTF-8", // XXX: this shouldn't be hard-coded,
|
||||||
pg_version,
|
pg_version,
|
||||||
initdb_bin: pg_bin_dir.join("initdb").as_ref(),
|
initdb_bin: pg_bin_dir.join("initdb").as_ref(),
|
||||||
library_search_path: &pg_lib_dir, // TODO: is this right? Prob works in compute image, not sure about neon_local.
|
library_search_path: &pg_lib_dir, // TODO: is this right? Prob works in compute image, not sure about neon_local.
|
||||||
@@ -210,7 +159,6 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
let mut postgres_proc = tokio::process::Command::new(pgbin)
|
let mut postgres_proc = tokio::process::Command::new(pgbin)
|
||||||
.arg("-D")
|
.arg("-D")
|
||||||
.arg(&pgdata_dir)
|
.arg(&pgdata_dir)
|
||||||
.args(["-p", &format!("{pg_port}")])
|
|
||||||
.args(["-c", "wal_level=minimal"])
|
.args(["-c", "wal_level=minimal"])
|
||||||
.args(["-c", "shared_buffers=10GB"])
|
.args(["-c", "shared_buffers=10GB"])
|
||||||
.args(["-c", "max_wal_senders=0"])
|
.args(["-c", "max_wal_senders=0"])
|
||||||
@@ -222,15 +170,8 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
.args(["-c", &format!("max_parallel_workers={nproc}")])
|
.args(["-c", &format!("max_parallel_workers={nproc}")])
|
||||||
.args(["-c", &format!("max_parallel_workers_per_gather={nproc}")])
|
.args(["-c", &format!("max_parallel_workers_per_gather={nproc}")])
|
||||||
.args(["-c", &format!("max_worker_processes={nproc}")])
|
.args(["-c", &format!("max_worker_processes={nproc}")])
|
||||||
.args([
|
.args(["-c", "effective_io_concurrency=100"])
|
||||||
"-c",
|
|
||||||
&format!(
|
|
||||||
"effective_io_concurrency={}",
|
|
||||||
if cfg!(target_os = "macos") { 0 } else { 100 }
|
|
||||||
),
|
|
||||||
])
|
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env("LD_LIBRARY_PATH", &pg_lib_dir)
|
|
||||||
.stdout(std::process::Stdio::piped())
|
.stdout(std::process::Stdio::piped())
|
||||||
.stderr(std::process::Stdio::piped())
|
.stderr(std::process::Stdio::piped())
|
||||||
.spawn()
|
.spawn()
|
||||||
@@ -244,58 +185,44 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
)
|
)
|
||||||
.instrument(info_span!("postgres")),
|
.instrument(info_span!("postgres")),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Create neondb database in the running postgres
|
|
||||||
let restore_pg_connstring =
|
let restore_pg_connstring =
|
||||||
format!("host=localhost port={pg_port} user={superuser} dbname=postgres");
|
format!("host=localhost port=5432 user={superuser} dbname=postgres");
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if start_time.elapsed() > PG_WAIT_TIMEOUT {
|
let res = tokio_postgres::connect(&restore_pg_connstring, tokio_postgres::NoTls).await;
|
||||||
error!(
|
if res.is_ok() {
|
||||||
"timeout exceeded: failed to poll postgres and create database within 10 minutes"
|
info!("postgres is ready, could connect to it");
|
||||||
);
|
break;
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
match tokio_postgres::connect(&restore_pg_connstring, tokio_postgres::NoTls).await {
|
|
||||||
Ok((client, connection)) => {
|
|
||||||
// Spawn the connection handling task to maintain the connection
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = connection.await {
|
|
||||||
warn!("connection error: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
match client.simple_query("CREATE DATABASE neondb;").await {
|
|
||||||
Ok(_) => {
|
|
||||||
info!("created neondb database");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"failed to create database: {}, retying in {}s",
|
|
||||||
e,
|
|
||||||
PG_WAIT_RETRY_INTERVAL.as_secs_f32()
|
|
||||||
);
|
|
||||||
tokio::time::sleep(PG_WAIT_RETRY_INTERVAL).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
info!(
|
|
||||||
"postgres not ready yet, retrying in {}s",
|
|
||||||
PG_WAIT_RETRY_INTERVAL.as_secs_f32()
|
|
||||||
);
|
|
||||||
tokio::time::sleep(PG_WAIT_RETRY_INTERVAL).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let restore_pg_connstring = restore_pg_connstring.replace("dbname=postgres", "dbname=neondb");
|
//
|
||||||
|
// Decrypt connection string
|
||||||
|
//
|
||||||
|
let source_connection_string = {
|
||||||
|
match spec.encryption_secret {
|
||||||
|
EncryptionSecret::KMS { key_id } => {
|
||||||
|
let mut output = kms_client
|
||||||
|
.decrypt()
|
||||||
|
.key_id(key_id)
|
||||||
|
.ciphertext_blob(aws_sdk_s3::primitives::Blob::new(
|
||||||
|
spec.source_connstring_ciphertext_base64,
|
||||||
|
))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.context("decrypt source connection string")?;
|
||||||
|
let plaintext = output
|
||||||
|
.plaintext
|
||||||
|
.take()
|
||||||
|
.context("get plaintext source connection string")?;
|
||||||
|
String::from_utf8(plaintext.into_inner())
|
||||||
|
.context("parse source connection string as utf8")?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// Start the work
|
||||||
|
//
|
||||||
|
|
||||||
let dumpdir = working_directory.join("dumpdir");
|
let dumpdir = working_directory.join("dumpdir");
|
||||||
|
|
||||||
@@ -329,7 +256,6 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
.arg(&source_connection_string)
|
.arg(&source_connection_string)
|
||||||
// how we run it
|
// how we run it
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env("LD_LIBRARY_PATH", &pg_lib_dir)
|
|
||||||
.kill_on_drop(true)
|
.kill_on_drop(true)
|
||||||
.stdout(std::process::Stdio::piped())
|
.stdout(std::process::Stdio::piped())
|
||||||
.stderr(std::process::Stdio::piped())
|
.stderr(std::process::Stdio::piped())
|
||||||
@@ -363,7 +289,6 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
.arg(&dumpdir)
|
.arg(&dumpdir)
|
||||||
// how we run it
|
// how we run it
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env("LD_LIBRARY_PATH", &pg_lib_dir)
|
|
||||||
.kill_on_drop(true)
|
.kill_on_drop(true)
|
||||||
.stdout(std::process::Stdio::piped())
|
.stdout(std::process::Stdio::piped())
|
||||||
.stderr(std::process::Stdio::piped())
|
.stderr(std::process::Stdio::piped())
|
||||||
@@ -385,12 +310,6 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If interactive mode, wait for Ctrl+C
|
|
||||||
if args.interactive {
|
|
||||||
info!("Running in interactive mode. Press Ctrl+C to shut down.");
|
|
||||||
tokio::signal::ctrl_c().await.context("wait for ctrl-c")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("shutdown postgres");
|
info!("shutdown postgres");
|
||||||
{
|
{
|
||||||
nix::sys::signal::kill(
|
nix::sys::signal::kill(
|
||||||
@@ -406,24 +325,21 @@ pub(crate) async fn main() -> anyhow::Result<()> {
|
|||||||
.context("wait for postgres to shut down")?;
|
.context("wait for postgres to shut down")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only sync if s3_prefix was specified
|
info!("upload pgdata");
|
||||||
if let Some(s3_prefix) = args.s3_prefix {
|
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
|
||||||
info!("upload pgdata");
|
.await
|
||||||
aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
|
.context("sync dump directory to destination")?;
|
||||||
.await
|
|
||||||
.context("sync dump directory to destination")?;
|
|
||||||
|
|
||||||
info!("write status");
|
info!("write status");
|
||||||
{
|
{
|
||||||
let status_dir = working_directory.join("status");
|
let status_dir = working_directory.join("status");
|
||||||
std::fs::create_dir(&status_dir).context("create status directory")?;
|
std::fs::create_dir(&status_dir).context("create status directory")?;
|
||||||
let status_file = status_dir.join("pgdata");
|
let status_file = status_dir.join("status");
|
||||||
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
|
||||||
.context("write status file")?;
|
.context("write status file")?;
|
||||||
aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
|
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
|
||||||
.await
|
.await
|
||||||
.context("sync status directory to destination")?;
|
.context("sync status directory to destination")?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -4,21 +4,24 @@ use camino::Utf8Path;
|
|||||||
use super::s3_uri::S3Uri;
|
use super::s3_uri::S3Uri;
|
||||||
|
|
||||||
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
|
||||||
let mut builder = tokio::process::Command::new("aws");
|
let mut builder = tokio::process::Command::new("s5cmd");
|
||||||
|
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
|
||||||
|
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
|
||||||
|
builder.arg("--endpoint-url").arg(val);
|
||||||
|
}
|
||||||
builder
|
builder
|
||||||
.arg("s3")
|
|
||||||
.arg("sync")
|
.arg("sync")
|
||||||
.arg(local.as_str())
|
.arg(local.as_str())
|
||||||
.arg(remote.to_string());
|
.arg(remote.to_string());
|
||||||
let st = builder
|
let st = builder
|
||||||
.spawn()
|
.spawn()
|
||||||
.context("spawn aws s3 sync")?
|
.context("spawn s5cmd")?
|
||||||
.wait()
|
.wait()
|
||||||
.await
|
.await
|
||||||
.context("wait for aws s3 sync")?;
|
.context("wait for s5cmd")?;
|
||||||
if st.success() {
|
if st.success() {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow::anyhow!("aws s3 sync failed"))
|
Err(anyhow::anyhow!("s5cmd failed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -36,11 +36,11 @@ pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<Cat
|
|||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum SchemaDumpError {
|
pub enum SchemaDumpError {
|
||||||
#[error("database does not exist")]
|
#[error("Database does not exist.")]
|
||||||
DatabaseDoesNotExist,
|
DatabaseDoesNotExist,
|
||||||
#[error("failed to execute pg_dump")]
|
#[error("Failed to execute pg_dump.")]
|
||||||
IO(#[from] std::io::Error),
|
IO(#[from] std::io::Error),
|
||||||
#[error("unexpected I/O error")]
|
#[error("Unexpected error.")]
|
||||||
Unexpected,
|
Unexpected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use compute_api::spec::{Database, PgIdent, Role};
|
use compute_api::spec::{PgIdent, Role};
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
@@ -41,14 +41,12 @@ use crate::local_proxy;
|
|||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
use crate::spec_apply::ApplySpecPhase::{
|
use crate::spec_apply::ApplySpecPhase::{
|
||||||
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSchemaNeon,
|
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSuperUser,
|
||||||
CreateSuperUser, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions,
|
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
||||||
HandleNeonExtension, HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles,
|
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
||||||
RunInEachDatabase,
|
|
||||||
};
|
};
|
||||||
use crate::spec_apply::PerDatabasePhase;
|
|
||||||
use crate::spec_apply::PerDatabasePhase::{
|
use crate::spec_apply::PerDatabasePhase::{
|
||||||
ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions, HandleAnonExtension,
|
ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
|
||||||
};
|
};
|
||||||
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
||||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||||
@@ -340,15 +338,6 @@ impl ComputeNode {
|
|||||||
self.state.lock().unwrap().status
|
self.state.lock().unwrap().status
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_timeline_id(&self) -> Option<TimelineId> {
|
|
||||||
self.state
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.map(|s| s.timeline_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove `pgdata` directory and create it again with right permissions.
|
// Remove `pgdata` directory and create it again with right permissions.
|
||||||
fn create_pgdata(&self) -> Result<()> {
|
fn create_pgdata(&self) -> Result<()> {
|
||||||
// Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
|
// Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
|
||||||
@@ -845,7 +834,7 @@ impl ComputeNode {
|
|||||||
conf
|
conf
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_maintenance_client(
|
async fn get_maintenance_client(
|
||||||
conf: &tokio_postgres::Config,
|
conf: &tokio_postgres::Config,
|
||||||
) -> Result<tokio_postgres::Client> {
|
) -> Result<tokio_postgres::Client> {
|
||||||
let mut conf = conf.clone();
|
let mut conf = conf.clone();
|
||||||
@@ -938,48 +927,6 @@ impl ComputeNode {
|
|||||||
.map(|role| (role.name.clone(), role))
|
.map(|role| (role.name.clone(), role))
|
||||||
.collect::<HashMap<String, Role>>();
|
.collect::<HashMap<String, Role>>();
|
||||||
|
|
||||||
// Check if we need to drop subscriptions before starting the endpoint.
|
|
||||||
//
|
|
||||||
// It is important to do this operation exactly once when endpoint starts on a new branch.
|
|
||||||
// Otherwise, we may drop not inherited, but newly created subscriptions.
|
|
||||||
//
|
|
||||||
// We cannot rely only on spec.drop_subscriptions_before_start flag,
|
|
||||||
// because if for some reason compute restarts inside VM,
|
|
||||||
// it will start again with the same spec and flag value.
|
|
||||||
//
|
|
||||||
// To handle this, we save the fact of the operation in the database
|
|
||||||
// in the neon.drop_subscriptions_done table.
|
|
||||||
// If the table does not exist, we assume that the operation was never performed, so we must do it.
|
|
||||||
// If table exists, we check if the operation was performed on the current timelilne.
|
|
||||||
//
|
|
||||||
let mut drop_subscriptions_done = false;
|
|
||||||
|
|
||||||
if spec.drop_subscriptions_before_start {
|
|
||||||
let timeline_id = self.get_timeline_id().context("timeline_id must be set")?;
|
|
||||||
let query = format!("select 1 from neon.drop_subscriptions_done where timeline_id = '{}'", timeline_id);
|
|
||||||
|
|
||||||
info!("Checking if drop subscription operation was already performed for timeline_id: {}", timeline_id);
|
|
||||||
|
|
||||||
drop_subscriptions_done = match
|
|
||||||
client.simple_query(&query).await {
|
|
||||||
Ok(result) => {
|
|
||||||
matches!(&result[0], postgres::SimpleQueryMessage::Row(_))
|
|
||||||
},
|
|
||||||
Err(e) =>
|
|
||||||
{
|
|
||||||
match e.code() {
|
|
||||||
Some(&SqlState::UNDEFINED_TABLE) => false,
|
|
||||||
_ => {
|
|
||||||
// We don't expect any other error here, except for the schema/table not existing
|
|
||||||
error!("Error checking if drop subscription operation was already performed: {}", e);
|
|
||||||
return Err(e.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
let jwks_roles = Arc::new(
|
let jwks_roles = Arc::new(
|
||||||
spec.as_ref()
|
spec.as_ref()
|
||||||
.local_proxy_config
|
.local_proxy_config
|
||||||
@@ -996,78 +943,6 @@ impl ComputeNode {
|
|||||||
dbs: databases,
|
dbs: databases,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Apply special pre drop database phase.
|
|
||||||
// NOTE: we use the code of RunInEachDatabase phase for parallelism
|
|
||||||
// and connection management, but we don't really run it in *each* database,
|
|
||||||
// only in databases, we're about to drop.
|
|
||||||
info!("Applying PerDatabase (pre-dropdb) phase");
|
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
|
||||||
|
|
||||||
// Run the phase for each database that we're about to drop.
|
|
||||||
let db_processes = spec
|
|
||||||
.delta_operations
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.filter_map(move |op| {
|
|
||||||
if op.action.as_str() == "delete_db" {
|
|
||||||
Some(op.name.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map(|dbname| {
|
|
||||||
let spec = spec.clone();
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
let jwks_roles = jwks_roles.clone();
|
|
||||||
let mut conf = conf.as_ref().clone();
|
|
||||||
let concurrency_token = concurrency_token.clone();
|
|
||||||
// We only need dbname field for this phase, so set other fields to dummy values
|
|
||||||
let db = DB::UserDB(Database {
|
|
||||||
name: dbname.clone(),
|
|
||||||
owner: "cloud_admin".to_string(),
|
|
||||||
options: None,
|
|
||||||
restrict_conn: false,
|
|
||||||
invalid: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
debug!("Applying per-database phases for Database {:?}", &db);
|
|
||||||
|
|
||||||
match &db {
|
|
||||||
DB::SystemDB => {}
|
|
||||||
DB::UserDB(db) => {
|
|
||||||
conf.dbname(db.name.as_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let conf = Arc::new(conf);
|
|
||||||
let fut = Self::apply_spec_sql_db(
|
|
||||||
spec.clone(),
|
|
||||||
conf,
|
|
||||||
ctx.clone(),
|
|
||||||
jwks_roles.clone(),
|
|
||||||
concurrency_token.clone(),
|
|
||||||
db,
|
|
||||||
[DropLogicalSubscriptions].to_vec(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(spawn(fut))
|
|
||||||
})
|
|
||||||
.collect::<Vec<Result<_, anyhow::Error>>>();
|
|
||||||
|
|
||||||
for process in db_processes.into_iter() {
|
|
||||||
let handle = process?;
|
|
||||||
if let Err(e) = handle.await? {
|
|
||||||
// Handle the error case where the database does not exist
|
|
||||||
// We do not check whether the DB exists or not in the deletion phase,
|
|
||||||
// so we shouldn't be strict about it in pre-deletion cleanup as well.
|
|
||||||
if e.to_string().contains("does not exist") {
|
|
||||||
warn!("Error dropping subscription: {}", e);
|
|
||||||
} else {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
for phase in [
|
for phase in [
|
||||||
CreateSuperUser,
|
CreateSuperUser,
|
||||||
DropInvalidDatabases,
|
DropInvalidDatabases,
|
||||||
@@ -1075,7 +950,6 @@ impl ComputeNode {
|
|||||||
CreateAndAlterRoles,
|
CreateAndAlterRoles,
|
||||||
RenameAndDeleteDatabases,
|
RenameAndDeleteDatabases,
|
||||||
CreateAndAlterDatabases,
|
CreateAndAlterDatabases,
|
||||||
CreateSchemaNeon,
|
|
||||||
] {
|
] {
|
||||||
info!("Applying phase {:?}", &phase);
|
info!("Applying phase {:?}", &phase);
|
||||||
apply_operations(
|
apply_operations(
|
||||||
@@ -1088,7 +962,7 @@ impl ComputeNode {
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Applying RunInEachDatabase2 phase");
|
info!("Applying RunInEachDatabase phase");
|
||||||
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
|
||||||
|
|
||||||
let db_processes = spec
|
let db_processes = spec
|
||||||
@@ -1116,17 +990,6 @@ impl ComputeNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let conf = Arc::new(conf);
|
let conf = Arc::new(conf);
|
||||||
let mut phases = vec![
|
|
||||||
DeleteDBRoleReferences,
|
|
||||||
ChangeSchemaPerms,
|
|
||||||
HandleAnonExtension,
|
|
||||||
];
|
|
||||||
|
|
||||||
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
|
|
||||||
info!("Adding DropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
|
|
||||||
phases.push(DropLogicalSubscriptions);
|
|
||||||
}
|
|
||||||
|
|
||||||
let fut = Self::apply_spec_sql_db(
|
let fut = Self::apply_spec_sql_db(
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
conf,
|
conf,
|
||||||
@@ -1134,7 +997,6 @@ impl ComputeNode {
|
|||||||
jwks_roles.clone(),
|
jwks_roles.clone(),
|
||||||
concurrency_token.clone(),
|
concurrency_token.clone(),
|
||||||
db,
|
db,
|
||||||
phases,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(spawn(fut))
|
Ok(spawn(fut))
|
||||||
@@ -1146,20 +1008,12 @@ impl ComputeNode {
|
|||||||
handle.await??;
|
handle.await??;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut phases = vec![
|
for phase in vec![
|
||||||
HandleOtherExtensions,
|
HandleOtherExtensions,
|
||||||
HandleNeonExtension, // This step depends on CreateSchemaNeon
|
HandleNeonExtension,
|
||||||
CreateAvailabilityCheck,
|
CreateAvailabilityCheck,
|
||||||
DropRoles,
|
DropRoles,
|
||||||
];
|
] {
|
||||||
|
|
||||||
// This step depends on CreateSchemaNeon
|
|
||||||
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
|
|
||||||
info!("Adding FinalizeDropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
|
|
||||||
phases.push(FinalizeDropLogicalSubscriptions);
|
|
||||||
}
|
|
||||||
|
|
||||||
for phase in phases {
|
|
||||||
debug!("Applying phase {:?}", &phase);
|
debug!("Applying phase {:?}", &phase);
|
||||||
apply_operations(
|
apply_operations(
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
@@ -1189,13 +1043,16 @@ impl ComputeNode {
|
|||||||
jwks_roles: Arc<HashSet<String>>,
|
jwks_roles: Arc<HashSet<String>>,
|
||||||
concurrency_token: Arc<tokio::sync::Semaphore>,
|
concurrency_token: Arc<tokio::sync::Semaphore>,
|
||||||
db: DB,
|
db: DB,
|
||||||
subphases: Vec<PerDatabasePhase>,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let _permit = concurrency_token.acquire().await?;
|
let _permit = concurrency_token.acquire().await?;
|
||||||
|
|
||||||
let mut client_conn = None;
|
let mut client_conn = None;
|
||||||
|
|
||||||
for subphase in subphases {
|
for subphase in [
|
||||||
|
DeleteDBRoleReferences,
|
||||||
|
ChangeSchemaPerms,
|
||||||
|
HandleAnonExtension,
|
||||||
|
] {
|
||||||
apply_operations(
|
apply_operations(
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
ctx.clone(),
|
ctx.clone(),
|
||||||
@@ -1324,19 +1181,8 @@ impl ComputeNode {
|
|||||||
let mut conf = postgres::config::Config::from(conf);
|
let mut conf = postgres::config::Config::from(conf);
|
||||||
conf.application_name("compute_ctl:migrations");
|
conf.application_name("compute_ctl:migrations");
|
||||||
|
|
||||||
match conf.connect(NoTls) {
|
let mut client = conf.connect(NoTls)?;
|
||||||
Ok(mut client) => {
|
handle_migrations(&mut client).context("apply_config handle_migrations")
|
||||||
if let Err(e) = handle_migrations(&mut client) {
|
|
||||||
error!("Failed to run migrations: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"Failed to connect to the compute for running migrations: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok::<(), anyhow::Error>(())
|
Ok::<(), anyhow::Error>(())
|
||||||
@@ -1529,14 +1375,6 @@ impl ComputeNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let postgresql_conf_path = pgdata_path.join("postgresql.conf");
|
|
||||||
if config::line_in_file(
|
|
||||||
&postgresql_conf_path,
|
|
||||||
"neon.disable_logical_replication_subscribers=false",
|
|
||||||
)? {
|
|
||||||
info!("updated postgresql.conf to set neon.disable_logical_replication_subscribers=false");
|
|
||||||
}
|
|
||||||
self.pg_reload_conf()?;
|
self.pg_reload_conf()?;
|
||||||
}
|
}
|
||||||
self.post_apply_config()?;
|
self.post_apply_config()?;
|
||||||
|
|||||||
@@ -129,13 +129,6 @@ pub fn write_postgres_conf(
|
|||||||
|
|
||||||
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
||||||
|
|
||||||
if spec.drop_subscriptions_before_start {
|
|
||||||
writeln!(file, "neon.disable_logical_replication_subscribers=true")?;
|
|
||||||
} else {
|
|
||||||
// be explicit about the default value
|
|
||||||
writeln!(file, "neon.disable_logical_replication_subscribers=false")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is essential to keep this line at the end of the file,
|
// This is essential to keep this line at the end of the file,
|
||||||
// because it is intended to override any settings above.
|
// because it is intended to override any settings above.
|
||||||
writeln!(file, "include_if_exists = 'compute_ctl_temp_override.conf'")?;
|
writeln!(file, "include_if_exists = 'compute_ctl_temp_override.conf'")?;
|
||||||
|
|||||||
@@ -85,8 +85,6 @@ use tracing::info;
|
|||||||
use tracing::log::warn;
|
use tracing::log::warn;
|
||||||
use zstd::stream::read::Decoder;
|
use zstd::stream::read::Decoder;
|
||||||
|
|
||||||
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
|
|
||||||
|
|
||||||
fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
||||||
// gives the result of `pg_config [argument]`
|
// gives the result of `pg_config [argument]`
|
||||||
// where argument is a flag like `--version` or `--sharedir`
|
// where argument is a flag like `--version` or `--sharedir`
|
||||||
@@ -258,60 +256,23 @@ pub fn create_control_files(remote_extensions: &RemoteExtSpec, pgbin: &str) {
|
|||||||
async fn download_extension_tar(ext_remote_storage: &str, ext_path: &str) -> Result<Bytes> {
|
async fn download_extension_tar(ext_remote_storage: &str, ext_path: &str) -> Result<Bytes> {
|
||||||
let uri = format!("{}/{}", ext_remote_storage, ext_path);
|
let uri = format!("{}/{}", ext_remote_storage, ext_path);
|
||||||
|
|
||||||
info!("Download extension {} from uri {}", ext_path, uri);
|
info!("Download extension {:?} from uri {:?}", ext_path, uri);
|
||||||
|
|
||||||
match do_extension_server_request(&uri).await {
|
let resp = reqwest::get(uri).await?;
|
||||||
Ok(resp) => {
|
|
||||||
info!("Successfully downloaded remote extension data {}", ext_path);
|
|
||||||
REMOTE_EXT_REQUESTS_TOTAL
|
|
||||||
.with_label_values(&[&StatusCode::OK.to_string()])
|
|
||||||
.inc();
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
||||||
Err((msg, status)) => {
|
|
||||||
REMOTE_EXT_REQUESTS_TOTAL
|
|
||||||
.with_label_values(&[&status])
|
|
||||||
.inc();
|
|
||||||
bail!(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do a single remote extensions server request.
|
match resp.status() {
|
||||||
// Return result or (error message + stringified status code) in case of any failures.
|
|
||||||
async fn do_extension_server_request(uri: &str) -> Result<Bytes, (String, String)> {
|
|
||||||
let resp = reqwest::get(uri).await.map_err(|e| {
|
|
||||||
(
|
|
||||||
format!(
|
|
||||||
"could not perform remote extensions server request: {:?}",
|
|
||||||
e
|
|
||||||
),
|
|
||||||
UNKNOWN_HTTP_STATUS.to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let status = resp.status();
|
|
||||||
|
|
||||||
match status {
|
|
||||||
StatusCode::OK => match resp.bytes().await {
|
StatusCode::OK => match resp.bytes().await {
|
||||||
Ok(resp) => Ok(resp),
|
Ok(resp) => {
|
||||||
Err(e) => Err((
|
info!("Download extension {:?} completed successfully", ext_path);
|
||||||
format!("could not read remote extensions server response: {:?}", e),
|
Ok(resp)
|
||||||
// It's fine to return and report error with status as 200 OK,
|
}
|
||||||
// because we still failed to read the response.
|
Err(e) => bail!("could not deserialize remote extension response: {}", e),
|
||||||
status.to_string(),
|
|
||||||
)),
|
|
||||||
},
|
},
|
||||||
StatusCode::SERVICE_UNAVAILABLE => Err((
|
StatusCode::SERVICE_UNAVAILABLE => bail!("remote extension is temporarily unavailable"),
|
||||||
"remote extensions server is temporarily unavailable".to_string(),
|
_ => bail!(
|
||||||
status.to_string(),
|
"unexpected remote extension response status code: {}",
|
||||||
)),
|
resp.status()
|
||||||
_ => Err((
|
),
|
||||||
format!(
|
|
||||||
"unexpected remote extensions server response status code: {}",
|
|
||||||
status
|
|
||||||
),
|
|
||||||
status.to_string(),
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
591
compute_tools/src/http/api.rs
Normal file
591
compute_tools/src/http/api.rs
Normal file
@@ -0,0 +1,591 @@
|
|||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::net::Ipv6Addr;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use crate::catalog::SchemaDumpError;
|
||||||
|
use crate::catalog::{get_database_schema, get_dbs_and_roles};
|
||||||
|
use crate::compute::forward_termination_signal;
|
||||||
|
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use crate::installed_extensions;
|
||||||
|
use compute_api::requests::{ConfigurationRequest, ExtensionInstallRequest, SetRoleGrantsRequest};
|
||||||
|
use compute_api::responses::{
|
||||||
|
ComputeStatus, ComputeStatusResponse, ExtensionInstallResult, GenericAPIError,
|
||||||
|
SetRoleGrantsResponse,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use hyper::header::CONTENT_TYPE;
|
||||||
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
|
use metrics::proto::MetricFamily;
|
||||||
|
use metrics::Encoder;
|
||||||
|
use metrics::TextEncoder;
|
||||||
|
use tokio::task;
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
use tracing_utils::http::OtelName;
|
||||||
|
use utils::http::request::must_get_query_param;
|
||||||
|
|
||||||
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
|
ComputeStatusResponse {
|
||||||
|
start_time: state.start_time,
|
||||||
|
tenant: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.tenant_id.to_string()),
|
||||||
|
timeline: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.timeline_id.to_string()),
|
||||||
|
status: state.status,
|
||||||
|
last_active: state.last_active,
|
||||||
|
error: state.error.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service function to handle all available routes.
|
||||||
|
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||||
|
//
|
||||||
|
// NOTE: The URI path is currently included in traces. That's OK because
|
||||||
|
// it doesn't contain any variable parts or sensitive information. But
|
||||||
|
// please keep that in mind if you change the routing here.
|
||||||
|
//
|
||||||
|
match (req.method(), req.uri().path()) {
|
||||||
|
// Serialized compute state.
|
||||||
|
(&Method::GET, "/status") => {
|
||||||
|
debug!("serving /status GET request");
|
||||||
|
let state = compute.state.lock().unwrap();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Response::new(Body::from(serde_json::to_string(&status_response).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
||||||
|
// future use for Prometheus metrics format.
|
||||||
|
(&Method::GET, "/metrics.json") => {
|
||||||
|
info!("serving /metrics.json GET request");
|
||||||
|
let metrics = compute.state.lock().unwrap().metrics.clone();
|
||||||
|
Response::new(Body::from(serde_json::to_string(&metrics).unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prometheus metrics
|
||||||
|
(&Method::GET, "/metrics") => {
|
||||||
|
debug!("serving /metrics GET request");
|
||||||
|
|
||||||
|
// When we call TextEncoder::encode() below, it will immediately
|
||||||
|
// return an error if a metric family has no metrics, so we need to
|
||||||
|
// preemptively filter out metric families with no metrics.
|
||||||
|
let metrics = installed_extensions::collect()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|m| !m.get_metric().is_empty())
|
||||||
|
.collect::<Vec<MetricFamily>>();
|
||||||
|
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
let mut buffer = vec![];
|
||||||
|
|
||||||
|
if let Err(err) = encoder.encode(&metrics, &mut buffer) {
|
||||||
|
let msg = format!("error handling /metrics request: {err}");
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
match Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(CONTENT_TYPE, encoder.format_type())
|
||||||
|
.body(Body::from(buffer))
|
||||||
|
{
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(err) => {
|
||||||
|
let msg = format!("error handling /metrics request: {err}");
|
||||||
|
error!(msg);
|
||||||
|
render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Collect Postgres current usage insights
|
||||||
|
(&Method::GET, "/insights") => {
|
||||||
|
info!("serving /insights GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!("compute is not running, current status: {:?}", status);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let insights = compute.collect_insights().await;
|
||||||
|
Response::new(Body::from(insights))
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/check_writability") => {
|
||||||
|
info!("serving /check_writability POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for check_writability request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = crate::checker::check_writability(compute).await;
|
||||||
|
match res {
|
||||||
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
|
Err(e) => {
|
||||||
|
error!("check_writability failed: {}", e);
|
||||||
|
Response::new(Body::from(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/extensions") => {
|
||||||
|
info!("serving /extensions POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for extensions request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
|
||||||
|
}
|
||||||
|
|
||||||
|
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let request = serde_json::from_slice::<ExtensionInstallRequest>(&request).unwrap();
|
||||||
|
let res = compute
|
||||||
|
.install_extension(&request.extension, &request.database, request.version)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
Ok(version) => render_json(Body::from(
|
||||||
|
serde_json::to_string(&ExtensionInstallResult {
|
||||||
|
extension: request.extension,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
)),
|
||||||
|
Err(e) => {
|
||||||
|
error!("install_extension failed: {}", e);
|
||||||
|
render_json_error(&e.to_string(), StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/info") => {
|
||||||
|
let num_cpus = num_cpus::get_physical();
|
||||||
|
info!("serving /info GET request. num_cpus: {}", num_cpus);
|
||||||
|
Response::new(Body::from(
|
||||||
|
serde_json::json!({
|
||||||
|
"num_cpus": num_cpus,
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept spec in JSON format and request compute configuration. If
|
||||||
|
// anything goes wrong after we set the compute status to `ConfigurationPending`
|
||||||
|
// and update compute state with new spec, we basically leave compute
|
||||||
|
// in the potentially wrong state. That said, it's control-plane's
|
||||||
|
// responsibility to watch compute state after reconfiguration request
|
||||||
|
// and to clean restart in case of errors.
|
||||||
|
(&Method::POST, "/configure") => {
|
||||||
|
info!("serving /configure POST request");
|
||||||
|
match handle_configure_request(req, compute).await {
|
||||||
|
Ok(msg) => Response::new(Body::from(msg)),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /configure request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/terminate") => {
|
||||||
|
info!("serving /terminate POST request");
|
||||||
|
match handle_terminate_request(compute).await {
|
||||||
|
Ok(()) => Response::new(Body::empty()),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /terminate request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/dbs_and_roles") => {
|
||||||
|
info!("serving /dbs_and_roles GET request",);
|
||||||
|
match get_dbs_and_roles(compute).await {
|
||||||
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
|
Err(_) => {
|
||||||
|
render_json_error("can't get dbs and roles", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/database_schema") => {
|
||||||
|
let database = match must_get_query_param(&req, "database") {
|
||||||
|
Err(e) => return e.into_response(),
|
||||||
|
Ok(database) => database,
|
||||||
|
};
|
||||||
|
info!("serving /database_schema GET request with database: {database}",);
|
||||||
|
match get_database_schema(compute, &database).await {
|
||||||
|
Ok(res) => render_plain(Body::wrap_stream(res)),
|
||||||
|
Err(SchemaDumpError::DatabaseDoesNotExist) => {
|
||||||
|
render_json_error("database does not exist", StatusCode::NOT_FOUND)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("can't get schema dump: {}", e);
|
||||||
|
render_json_error("can't get schema dump", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::POST, "/grants") => {
|
||||||
|
info!("serving /grants POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for set_role_grants request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
|
||||||
|
}
|
||||||
|
|
||||||
|
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let request = serde_json::from_slice::<SetRoleGrantsRequest>(&request).unwrap();
|
||||||
|
|
||||||
|
let res = compute
|
||||||
|
.set_role_grants(
|
||||||
|
&request.database,
|
||||||
|
&request.schema,
|
||||||
|
&request.privileges,
|
||||||
|
&request.role,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
Ok(()) => render_json(Body::from(
|
||||||
|
serde_json::to_string(&SetRoleGrantsResponse {
|
||||||
|
database: request.database,
|
||||||
|
schema: request.schema,
|
||||||
|
role: request.role,
|
||||||
|
privileges: request.privileges,
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
)),
|
||||||
|
Err(e) => render_json_error(
|
||||||
|
&format!("could not grant role privileges to the schema: {e}"),
|
||||||
|
// TODO: can we filter on role/schema not found errors
|
||||||
|
// and return appropriate error code?
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the list of installed extensions
|
||||||
|
// currently only used in python tests
|
||||||
|
// TODO: call it from cplane
|
||||||
|
(&Method::GET, "/installed_extensions") => {
|
||||||
|
info!("serving /installed_extensions GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for extensions request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
let conf = compute.get_conn_conf(None);
|
||||||
|
let res =
|
||||||
|
task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
|
Err(e) => render_json_error(
|
||||||
|
&format!("could not get list of installed extensions: {}", e),
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// download extension files from remote extension storage on demand
|
||||||
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
|
info!("serving {:?} POST request", route);
|
||||||
|
info!("req.uri {:?}", req.uri());
|
||||||
|
|
||||||
|
// don't even try to download extensions
|
||||||
|
// if no remote storage is configured
|
||||||
|
if compute.ext_remote_storage.is_none() {
|
||||||
|
info!("no extensions remote storage configured");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut is_library = false;
|
||||||
|
if let Some(params) = req.uri().query() {
|
||||||
|
info!("serving {:?} POST request with params: {}", route, params);
|
||||||
|
if params == "is_library=true" {
|
||||||
|
is_library = true;
|
||||||
|
} else {
|
||||||
|
let mut resp = Response::new(Body::from("Wrong request parameters"));
|
||||||
|
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let filename = route.split('/').last().unwrap().to_string();
|
||||||
|
info!("serving /extension_server POST request, filename: {filename:?} is_library: {is_library}");
|
||||||
|
|
||||||
|
// get ext_name and path from spec
|
||||||
|
// don't lock compute_state for too long
|
||||||
|
let ext = {
|
||||||
|
let compute_state = compute.state.lock().unwrap();
|
||||||
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
|
let spec = &pspec.spec;
|
||||||
|
|
||||||
|
// debug only
|
||||||
|
info!("spec: {:?}", spec);
|
||||||
|
|
||||||
|
let remote_extensions = match spec.remote_extensions.as_ref() {
|
||||||
|
Some(r) => r,
|
||||||
|
None => {
|
||||||
|
info!("no remote extensions spec was provided");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
remote_extensions.get_ext(
|
||||||
|
&filename,
|
||||||
|
is_library,
|
||||||
|
&compute.build_tag,
|
||||||
|
&compute.pgversion,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
match ext {
|
||||||
|
Ok((ext_name, ext_path)) => {
|
||||||
|
match compute.download_extension(ext_name, ext_path).await {
|
||||||
|
Ok(_) => Response::new(Body::from("OK")),
|
||||||
|
Err(e) => {
|
||||||
|
error!("extension download failed: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from(e.to_string()));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("extension download failed to find extension: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from("failed to find file"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the `404 Not Found` for any other routes.
|
||||||
|
_ => {
|
||||||
|
let mut not_found = Response::new(Body::from("404 Not Found"));
|
||||||
|
*not_found.status_mut() = StatusCode::NOT_FOUND;
|
||||||
|
not_found
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_configure_request(
|
||||||
|
req: Request<Body>,
|
||||||
|
compute: &Arc<ComputeNode>,
|
||||||
|
) -> Result<String, (String, StatusCode)> {
|
||||||
|
if !compute.live_config_allowed {
|
||||||
|
return Err((
|
||||||
|
"live configuration is not allowed for this compute node".to_string(),
|
||||||
|
StatusCode::PRECONDITION_FAILED,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let spec_raw = String::from_utf8(body_bytes.to_vec()).unwrap();
|
||||||
|
if let Ok(request) = serde_json::from_str::<ConfigurationRequest>(&spec_raw) {
|
||||||
|
let spec = request.spec;
|
||||||
|
|
||||||
|
let parsed_spec = match ParsedSpec::try_from(spec) {
|
||||||
|
Ok(ps) => ps,
|
||||||
|
Err(msg) => return Err((msg, StatusCode::BAD_REQUEST)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// XXX: wrap state update under lock in code blocks. Otherwise,
|
||||||
|
// we will try to `Send` `mut state` into the spawned thread
|
||||||
|
// bellow, which will cause error:
|
||||||
|
// ```
|
||||||
|
// error: future cannot be sent between threads safely
|
||||||
|
// ```
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for configuration request: {:?}",
|
||||||
|
state.status.clone()
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.pspec = Some(parsed_spec);
|
||||||
|
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
||||||
|
drop(state);
|
||||||
|
info!("set new spec and notified waiters");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Running.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Running {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become Running, current status: {:?}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::Failed {
|
||||||
|
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
||||||
|
let msg = format!("compute configuration failed: {:?}", err);
|
||||||
|
return Err((msg, StatusCode::INTERNAL_SERVER_ERROR));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
|
||||||
|
// Return current compute state if everything went well.
|
||||||
|
let state = compute.state.lock().unwrap().clone();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Ok(serde_json::to_string(&status_response).unwrap())
|
||||||
|
} else {
|
||||||
|
Err(("invalid spec".to_string(), StatusCode::BAD_REQUEST))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
|
||||||
|
let error = GenericAPIError {
|
||||||
|
error: e.to_string(),
|
||||||
|
};
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
|
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_plain(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "text/plain")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status == ComputeStatus::Terminated {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for termination request: {}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
|
||||||
|
drop(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
forward_termination_signal();
|
||||||
|
info!("sent signal and notified waiters");
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Terminated.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Terminated {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become {}, current status: {:?}",
|
||||||
|
ComputeStatus::Terminated,
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
info!("terminated Postgres");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
||||||
|
#[tokio::main]
|
||||||
|
async fn serve(port: u16, state: Arc<ComputeNode>) {
|
||||||
|
// this usually binds to both IPv4 and IPv6 on linux
|
||||||
|
// see e.g. https://github.com/rust-lang/rust/pull/34440
|
||||||
|
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
|
||||||
|
|
||||||
|
let make_service = make_service_fn(move |_conn| {
|
||||||
|
let state = state.clone();
|
||||||
|
async move {
|
||||||
|
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||||
|
let state = state.clone();
|
||||||
|
async move {
|
||||||
|
Ok::<_, Infallible>(
|
||||||
|
// NOTE: We include the URI path in the string. It
|
||||||
|
// doesn't contain any variable parts or sensitive
|
||||||
|
// information in this API.
|
||||||
|
tracing_utils::http::tracing_handler(
|
||||||
|
req,
|
||||||
|
|req| routes(req, &state),
|
||||||
|
OtelName::UriPath,
|
||||||
|
)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
info!("starting HTTP server on {}", addr);
|
||||||
|
|
||||||
|
let server = Server::bind(&addr).serve(make_service);
|
||||||
|
|
||||||
|
// Run this server forever
|
||||||
|
if let Err(e) = server.await {
|
||||||
|
error!("server error: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
||||||
|
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
||||||
|
let state = Arc::clone(state);
|
||||||
|
|
||||||
|
Ok(thread::Builder::new()
|
||||||
|
.name("http-endpoint".into())
|
||||||
|
.spawn(move || serve(port, state))?)
|
||||||
|
}
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::extract::{rejection::JsonRejection, FromRequest, Request};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
/// Custom `Json` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Json<T>(pub T);
|
|
||||||
|
|
||||||
impl<S, T> FromRequest<S> for Json<T>
|
|
||||||
where
|
|
||||||
axum::Json<T>: FromRequest<S, Rejection = JsonRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::Json::<T>::from_request(req, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Json<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Json<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
pub(crate) mod json;
|
|
||||||
pub(crate) mod path;
|
|
||||||
pub(crate) mod query;
|
|
||||||
|
|
||||||
pub(crate) use json::Json;
|
|
||||||
pub(crate) use path::Path;
|
|
||||||
pub(crate) use query::Query;
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::extract::{rejection::PathRejection, FromRequestParts};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::{request::Parts, StatusCode};
|
|
||||||
|
|
||||||
/// Custom `Path` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Path<T>(pub T);
|
|
||||||
|
|
||||||
impl<S, T> FromRequestParts<S> for Path<T>
|
|
||||||
where
|
|
||||||
axum::extract::Path<T>: FromRequestParts<S, Rejection = PathRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::extract::Path::<T>::from_request_parts(parts, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_ascii_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Path<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Path<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use axum::extract::{rejection::QueryRejection, FromRequestParts};
|
|
||||||
use compute_api::responses::GenericAPIError;
|
|
||||||
use http::{request::Parts, StatusCode};
|
|
||||||
|
|
||||||
/// Custom `Query` extractor, so that we can format errors into
|
|
||||||
/// `JsonResponse<GenericAPIError>`.
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
pub(crate) struct Query<T>(pub T);
|
|
||||||
|
|
||||||
impl<S, T> FromRequestParts<S> for Query<T>
|
|
||||||
where
|
|
||||||
axum::extract::Query<T>: FromRequestParts<S, Rejection = QueryRejection>,
|
|
||||||
S: Send + Sync,
|
|
||||||
{
|
|
||||||
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
|
|
||||||
|
|
||||||
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
match axum::extract::Query::<T>::from_request_parts(parts, state).await {
|
|
||||||
Ok(value) => Ok(Self(value.0)),
|
|
||||||
Err(rejection) => Err((
|
|
||||||
rejection.status(),
|
|
||||||
axum::Json(GenericAPIError {
|
|
||||||
error: rejection.body_text().to_ascii_lowercase(),
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Query<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for Query<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,56 +1 @@
|
|||||||
use axum::{body::Body, response::Response};
|
pub mod api;
|
||||||
use compute_api::responses::{ComputeStatus, GenericAPIError};
|
|
||||||
use http::{header::CONTENT_TYPE, StatusCode};
|
|
||||||
use serde::Serialize;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
pub use server::launch_http_server;
|
|
||||||
|
|
||||||
mod extract;
|
|
||||||
mod routes;
|
|
||||||
mod server;
|
|
||||||
|
|
||||||
/// Convenience response builder for JSON responses
|
|
||||||
struct JsonResponse;
|
|
||||||
|
|
||||||
impl JsonResponse {
|
|
||||||
/// Helper for actually creating a response
|
|
||||||
fn create_response(code: StatusCode, body: impl Serialize) -> Response {
|
|
||||||
Response::builder()
|
|
||||||
.status(code)
|
|
||||||
.header(CONTENT_TYPE.as_str(), "application/json")
|
|
||||||
.body(Body::from(serde_json::to_string(&body).unwrap()))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a successful error response
|
|
||||||
pub(self) fn success(code: StatusCode, body: impl Serialize) -> Response {
|
|
||||||
assert!({
|
|
||||||
let code = code.as_u16();
|
|
||||||
|
|
||||||
(200..300).contains(&code)
|
|
||||||
});
|
|
||||||
|
|
||||||
Self::create_response(code, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an error response
|
|
||||||
pub(self) fn error(code: StatusCode, error: impl ToString) -> Response {
|
|
||||||
assert!(code.as_u16() >= 400);
|
|
||||||
|
|
||||||
let message = error.to_string();
|
|
||||||
error!(message);
|
|
||||||
|
|
||||||
Self::create_response(code, &GenericAPIError { error: message })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an error response related to the compute being in an invalid state
|
|
||||||
pub(self) fn invalid_status(status: ComputeStatus) -> Response {
|
|
||||||
Self::create_response(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
&GenericAPIError {
|
|
||||||
error: format!("invalid compute status: {status}"),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeMetrics"
|
$ref: "#/components/schemas/ComputeMetrics"
|
||||||
|
|
||||||
/metrics:
|
/metrics
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Info
|
- Info
|
||||||
@@ -68,6 +68,35 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeInsights"
|
$ref: "#/components/schemas/ComputeInsights"
|
||||||
|
|
||||||
|
/installed_extensions:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get installed extensions.
|
||||||
|
description: ""
|
||||||
|
operationId: getInstalledExtensions
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: List of installed extensions
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/InstalledExtensions"
|
||||||
|
/info:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get info about the compute pod / VM.
|
||||||
|
description: ""
|
||||||
|
operationId: getInfo
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Info
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/Info"
|
||||||
|
|
||||||
/dbs_and_roles:
|
/dbs_and_roles:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
@@ -508,14 +537,12 @@ components:
|
|||||||
properties:
|
properties:
|
||||||
extname:
|
extname:
|
||||||
type: string
|
type: string
|
||||||
version:
|
versions:
|
||||||
type: string
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
n_databases:
|
n_databases:
|
||||||
type: integer
|
type: integer
|
||||||
owned_by_superuser:
|
|
||||||
type: integer
|
|
||||||
|
|
||||||
SetRoleGrantsRequest:
|
SetRoleGrantsRequest:
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{checker::check_writability, compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Check that the compute is currently running.
|
|
||||||
pub(in crate::http) async fn is_writable(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match check_writability(&compute).await {
|
|
||||||
Ok(_) => JsonResponse::success(StatusCode::OK, true),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::ConfigurationRequest,
|
|
||||||
responses::{ComputeStatus, ComputeStatusResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::{ComputeNode, ParsedSpec},
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Accept spec in JSON format and request compute configuration. If anything
|
|
||||||
// goes wrong after we set the compute status to `ConfigurationPending` and
|
|
||||||
// update compute state with new spec, we basically leave compute in the
|
|
||||||
// potentially wrong state. That said, it's control-plane's responsibility to
|
|
||||||
// watch compute state after reconfiguration request and to clean restart in
|
|
||||||
// case of errors.
|
|
||||||
pub(in crate::http) async fn configure(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<ConfigurationRequest>,
|
|
||||||
) -> Response {
|
|
||||||
if !compute.live_config_allowed {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"live configuration is not allowed for this compute node".to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let pspec = match ParsedSpec::try_from(request.spec.clone()) {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(e) => return JsonResponse::error(StatusCode::BAD_REQUEST, e),
|
|
||||||
};
|
|
||||||
|
|
||||||
// XXX: wrap state update under lock in a code block. Otherwise, we will try
|
|
||||||
// to `Send` `mut state` into the spawned thread bellow, which will cause
|
|
||||||
// the following rustc error:
|
|
||||||
//
|
|
||||||
// error: future cannot be sent between threads safely
|
|
||||||
{
|
|
||||||
let mut state = compute.state.lock().unwrap();
|
|
||||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
|
||||||
return JsonResponse::invalid_status(state.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
state.pspec = Some(pspec);
|
|
||||||
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
|
||||||
drop(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn a blocking thread to wait for compute to become Running. This is
|
|
||||||
// needed to do not block the main pool of workers and be able to serve
|
|
||||||
// other requests while some particular request is waiting for compute to
|
|
||||||
// finish configuration.
|
|
||||||
let c = compute.clone();
|
|
||||||
let completed = task::spawn_blocking(move || {
|
|
||||||
let mut state = c.state.lock().unwrap();
|
|
||||||
while state.status != ComputeStatus::Running {
|
|
||||||
state = c.state_changed.wait(state).unwrap();
|
|
||||||
info!(
|
|
||||||
"waiting for compute to become {}, current status: {}",
|
|
||||||
ComputeStatus::Running,
|
|
||||||
state.status
|
|
||||||
);
|
|
||||||
|
|
||||||
if state.status == ComputeStatus::Failed {
|
|
||||||
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
|
||||||
let msg = format!("compute configuration failed: {:?}", err);
|
|
||||||
return Err(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
if let Err(e) = completed {
|
|
||||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return current compute state if everything went well.
|
|
||||||
let state = compute.state.lock().unwrap().clone();
|
|
||||||
let body = ComputeStatusResponse::from(&state);
|
|
||||||
|
|
||||||
JsonResponse::success(StatusCode::OK, body)
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{body::Body, extract::State, response::Response};
|
|
||||||
use http::{header::CONTENT_TYPE, StatusCode};
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
catalog::{get_database_schema, SchemaDumpError},
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Query, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub(in crate::http) struct DatabaseSchemaParams {
|
|
||||||
database: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a schema dump of the requested database.
|
|
||||||
pub(in crate::http) async fn get_schema_dump(
|
|
||||||
params: Query<DatabaseSchemaParams>,
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
match get_database_schema(&compute, ¶ms.database).await {
|
|
||||||
Ok(schema) => Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(CONTENT_TYPE.as_str(), "application/json")
|
|
||||||
.body(Body::from_stream(schema))
|
|
||||||
.unwrap(),
|
|
||||||
Err(SchemaDumpError::DatabaseDoesNotExist) => {
|
|
||||||
JsonResponse::error(StatusCode::NOT_FOUND, SchemaDumpError::DatabaseDoesNotExist)
|
|
||||||
}
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{catalog::get_dbs_and_roles, compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Get the databases and roles from the compute.
|
|
||||||
pub(in crate::http) async fn get_catalog_objects(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
match get_dbs_and_roles(&compute).await {
|
|
||||||
Ok(catalog_objects) => JsonResponse::success(StatusCode::OK, catalog_objects),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
extract::State,
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{
|
|
||||||
extract::{Path, Query},
|
|
||||||
JsonResponse,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub(in crate::http) struct ExtensionServerParams {
|
|
||||||
#[serde(default)]
|
|
||||||
is_library: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download a remote extension.
|
|
||||||
pub(in crate::http) async fn download_extension(
|
|
||||||
Path(filename): Path<String>,
|
|
||||||
params: Query<ExtensionServerParams>,
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
) -> Response {
|
|
||||||
// Don't even try to download extensions if no remote storage is configured
|
|
||||||
if compute.ext_remote_storage.is_none() {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"remote storage is not configured",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let ext = {
|
|
||||||
let state = compute.state.lock().unwrap();
|
|
||||||
let pspec = state.pspec.as_ref().unwrap();
|
|
||||||
let spec = &pspec.spec;
|
|
||||||
|
|
||||||
let remote_extensions = match spec.remote_extensions.as_ref() {
|
|
||||||
Some(r) => r,
|
|
||||||
None => {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::CONFLICT,
|
|
||||||
"information about remote extensions is unavailable",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
remote_extensions.get_ext(
|
|
||||||
&filename,
|
|
||||||
params.is_library,
|
|
||||||
&compute.build_tag,
|
|
||||||
&compute.pgversion,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
match ext {
|
|
||||||
Ok((ext_name, ext_path)) => match compute.download_extension(ext_name, ext_path).await {
|
|
||||||
Ok(_) => StatusCode::OK.into_response(),
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
|
||||||
},
|
|
||||||
Err(e) => JsonResponse::error(StatusCode::NOT_FOUND, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::ExtensionInstallRequest,
|
|
||||||
responses::{ComputeStatus, ExtensionInstallResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Install a extension.
|
|
||||||
pub(in crate::http) async fn install_extension(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<ExtensionInstallRequest>,
|
|
||||||
) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match compute
|
|
||||||
.install_extension(
|
|
||||||
&request.extension,
|
|
||||||
&request.database,
|
|
||||||
request.version.to_string(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(version) => JsonResponse::success(
|
|
||||||
StatusCode::CREATED,
|
|
||||||
Some(ExtensionInstallResponse {
|
|
||||||
extension: request.extension.clone(),
|
|
||||||
version,
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
Err(e) => JsonResponse::error(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
format!("failed to install extension: {e}"),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tracing::info;
|
|
||||||
use utils::failpoint_support::{apply_failpoint, ConfigureFailpointsRequest};
|
|
||||||
|
|
||||||
use crate::http::{extract::Json, JsonResponse};
|
|
||||||
|
|
||||||
/// Configure failpoints for testing purposes.
|
|
||||||
pub(in crate::http) async fn configure_failpoints(
|
|
||||||
failpoints: Json<ConfigureFailpointsRequest>,
|
|
||||||
) -> Response {
|
|
||||||
if !fail::has_failpoints() {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::PRECONDITION_FAILED,
|
|
||||||
"Cannot manage failpoints because neon was compiled without failpoints support",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for fp in &*failpoints {
|
|
||||||
info!("cfg failpoint: {} {}", fp.name, fp.actions);
|
|
||||||
|
|
||||||
// We recognize one extra "action" that's not natively recognized
|
|
||||||
// by the failpoints crate: exit, to immediately kill the process
|
|
||||||
let cfg_result = apply_failpoint(&fp.name, &fp.actions);
|
|
||||||
|
|
||||||
if let Err(e) = cfg_result {
|
|
||||||
return JsonResponse::error(
|
|
||||||
StatusCode::BAD_REQUEST,
|
|
||||||
format!("failed to configure failpoints: {e}"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
StatusCode::OK.into_response()
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::{
|
|
||||||
requests::SetRoleGrantsRequest,
|
|
||||||
responses::{ComputeStatus, SetRoleGrantsResponse},
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::ComputeNode,
|
|
||||||
http::{extract::Json, JsonResponse},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Add grants for a role.
|
|
||||||
pub(in crate::http) async fn add_grant(
|
|
||||||
State(compute): State<Arc<ComputeNode>>,
|
|
||||||
request: Json<SetRoleGrantsRequest>,
|
|
||||||
) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
match compute
|
|
||||||
.set_role_grants(
|
|
||||||
&request.database,
|
|
||||||
&request.schema,
|
|
||||||
&request.privileges,
|
|
||||||
&request.role,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(()) => JsonResponse::success(
|
|
||||||
StatusCode::CREATED,
|
|
||||||
Some(SetRoleGrantsResponse {
|
|
||||||
database: request.database.clone(),
|
|
||||||
schema: request.schema.clone(),
|
|
||||||
role: request.role.clone(),
|
|
||||||
privileges: request.privileges.clone(),
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
Err(e) => JsonResponse::error(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
format!("failed to grant role privileges to the schema: {e}"),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Collect current Postgres usage insights.
|
|
||||||
pub(in crate::http) async fn get_insights(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let status = compute.get_status();
|
|
||||||
if status != ComputeStatus::Running {
|
|
||||||
return JsonResponse::invalid_status(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
let insights = compute.collect_insights().await;
|
|
||||||
JsonResponse::success(StatusCode::OK, insights)
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
use axum::{body::Body, response::Response};
|
|
||||||
use http::header::CONTENT_TYPE;
|
|
||||||
use http::StatusCode;
|
|
||||||
use metrics::proto::MetricFamily;
|
|
||||||
use metrics::{Encoder, TextEncoder};
|
|
||||||
|
|
||||||
use crate::{http::JsonResponse, metrics::collect};
|
|
||||||
|
|
||||||
/// Expose Prometheus metrics.
|
|
||||||
pub(in crate::http) async fn get_metrics() -> Response {
|
|
||||||
// When we call TextEncoder::encode() below, it will immediately return an
|
|
||||||
// error if a metric family has no metrics, so we need to preemptively
|
|
||||||
// filter out metric families with no metrics.
|
|
||||||
let metrics = collect()
|
|
||||||
.into_iter()
|
|
||||||
.filter(|m| !m.get_metric().is_empty())
|
|
||||||
.collect::<Vec<MetricFamily>>();
|
|
||||||
|
|
||||||
let encoder = TextEncoder::new();
|
|
||||||
let mut buffer = vec![];
|
|
||||||
|
|
||||||
if let Err(e) = encoder.encode(&metrics, &mut buffer) {
|
|
||||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(CONTENT_TYPE, encoder.format_type())
|
|
||||||
.body(Body::from(buffer))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{extract::State, response::Response};
|
|
||||||
use http::StatusCode;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Get startup metrics.
|
|
||||||
pub(in crate::http) async fn get_metrics(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let metrics = compute.state.lock().unwrap().metrics.clone();
|
|
||||||
JsonResponse::success(StatusCode::OK, metrics)
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
use compute_api::responses::ComputeStatusResponse;
|
|
||||||
|
|
||||||
use crate::compute::ComputeState;
|
|
||||||
|
|
||||||
pub(in crate::http) mod check_writability;
|
|
||||||
pub(in crate::http) mod configure;
|
|
||||||
pub(in crate::http) mod database_schema;
|
|
||||||
pub(in crate::http) mod dbs_and_roles;
|
|
||||||
pub(in crate::http) mod extension_server;
|
|
||||||
pub(in crate::http) mod extensions;
|
|
||||||
pub(in crate::http) mod failpoints;
|
|
||||||
pub(in crate::http) mod grants;
|
|
||||||
pub(in crate::http) mod insights;
|
|
||||||
pub(in crate::http) mod metrics;
|
|
||||||
pub(in crate::http) mod metrics_json;
|
|
||||||
pub(in crate::http) mod status;
|
|
||||||
pub(in crate::http) mod terminate;
|
|
||||||
|
|
||||||
impl From<&ComputeState> for ComputeStatusResponse {
|
|
||||||
fn from(state: &ComputeState) -> Self {
|
|
||||||
ComputeStatusResponse {
|
|
||||||
start_time: state.start_time,
|
|
||||||
tenant: state
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.map(|pspec| pspec.tenant_id.to_string()),
|
|
||||||
timeline: state
|
|
||||||
.pspec
|
|
||||||
.as_ref()
|
|
||||||
.map(|pspec| pspec.timeline_id.to_string()),
|
|
||||||
status: state.status,
|
|
||||||
last_active: state.last_active,
|
|
||||||
error: state.error.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
use std::{ops::Deref, sync::Arc};
|
|
||||||
|
|
||||||
use axum::{extract::State, http::StatusCode, response::Response};
|
|
||||||
use compute_api::responses::ComputeStatusResponse;
|
|
||||||
|
|
||||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
|
||||||
|
|
||||||
/// Retrieve the state of the comute.
|
|
||||||
pub(in crate::http) async fn get_status(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
let state = compute.state.lock().unwrap();
|
|
||||||
let body = ComputeStatusResponse::from(state.deref());
|
|
||||||
|
|
||||||
JsonResponse::success(StatusCode::OK, body)
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
extract::State,
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
};
|
|
||||||
use compute_api::responses::ComputeStatus;
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
compute::{forward_termination_signal, ComputeNode},
|
|
||||||
http::JsonResponse,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Terminate the compute.
|
|
||||||
pub(in crate::http) async fn terminate(State(compute): State<Arc<ComputeNode>>) -> Response {
|
|
||||||
{
|
|
||||||
let mut state = compute.state.lock().unwrap();
|
|
||||||
if state.status == ComputeStatus::Terminated {
|
|
||||||
return StatusCode::CREATED.into_response();
|
|
||||||
}
|
|
||||||
|
|
||||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
|
||||||
return JsonResponse::invalid_status(state.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
|
|
||||||
drop(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
forward_termination_signal();
|
|
||||||
info!("sent signal and notified waiters");
|
|
||||||
|
|
||||||
// Spawn a blocking thread to wait for compute to become Terminated.
|
|
||||||
// This is needed to do not block the main pool of workers and
|
|
||||||
// be able to serve other requests while some particular request
|
|
||||||
// is waiting for compute to finish configuration.
|
|
||||||
let c = compute.clone();
|
|
||||||
task::spawn_blocking(move || {
|
|
||||||
let mut state = c.state.lock().unwrap();
|
|
||||||
while state.status != ComputeStatus::Terminated {
|
|
||||||
state = c.state_changed.wait(state).unwrap();
|
|
||||||
info!(
|
|
||||||
"waiting for compute to become {}, current status: {:?}",
|
|
||||||
ComputeStatus::Terminated,
|
|
||||||
state.status
|
|
||||||
);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
info!("terminated Postgres");
|
|
||||||
|
|
||||||
StatusCode::OK.into_response()
|
|
||||||
}
|
|
||||||
@@ -1,149 +0,0 @@
|
|||||||
use std::{
|
|
||||||
net::{IpAddr, Ipv6Addr, SocketAddr},
|
|
||||||
sync::Arc,
|
|
||||||
thread,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use axum::{
|
|
||||||
extract::Request,
|
|
||||||
middleware::{self, Next},
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
routing::{get, post},
|
|
||||||
Router,
|
|
||||||
};
|
|
||||||
use http::StatusCode;
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tower::ServiceBuilder;
|
|
||||||
use tower_http::{request_id::PropagateRequestIdLayer, trace::TraceLayer};
|
|
||||||
use tracing::{debug, error, info, Span};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::routes::{
|
|
||||||
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
|
|
||||||
grants, insights, metrics, metrics_json, status, terminate,
|
|
||||||
};
|
|
||||||
use crate::compute::ComputeNode;
|
|
||||||
|
|
||||||
async fn handle_404() -> Response {
|
|
||||||
StatusCode::NOT_FOUND.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
const X_REQUEST_ID: &str = "x-request-id";
|
|
||||||
|
|
||||||
/// This middleware function allows compute_ctl to generate its own request ID
|
|
||||||
/// if one isn't supplied. The control plane will always send one as a UUID. The
|
|
||||||
/// neon Postgres extension on the other hand does not send one.
|
|
||||||
async fn maybe_add_request_id_header(mut request: Request, next: Next) -> Response {
|
|
||||||
let headers = request.headers_mut();
|
|
||||||
|
|
||||||
if headers.get(X_REQUEST_ID).is_none() {
|
|
||||||
headers.append(X_REQUEST_ID, Uuid::new_v4().to_string().parse().unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
next.run(request).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the HTTP server and wait on it forever.
|
|
||||||
#[tokio::main]
|
|
||||||
async fn serve(port: u16, compute: Arc<ComputeNode>) {
|
|
||||||
let mut app = Router::new()
|
|
||||||
.route("/check_writability", post(check_writability::is_writable))
|
|
||||||
.route("/configure", post(configure::configure))
|
|
||||||
.route("/database_schema", get(database_schema::get_schema_dump))
|
|
||||||
.route("/dbs_and_roles", get(dbs_and_roles::get_catalog_objects))
|
|
||||||
.route(
|
|
||||||
"/extension_server/{*filename}",
|
|
||||||
post(extension_server::download_extension),
|
|
||||||
)
|
|
||||||
.route("/extensions", post(extensions::install_extension))
|
|
||||||
.route("/grants", post(grants::add_grant))
|
|
||||||
.route("/insights", get(insights::get_insights))
|
|
||||||
.route("/metrics", get(metrics::get_metrics))
|
|
||||||
.route("/metrics.json", get(metrics_json::get_metrics))
|
|
||||||
.route("/status", get(status::get_status))
|
|
||||||
.route("/terminate", post(terminate::terminate))
|
|
||||||
.fallback(handle_404)
|
|
||||||
.layer(
|
|
||||||
ServiceBuilder::new()
|
|
||||||
// Add this middleware since we assume the request ID exists
|
|
||||||
.layer(middleware::from_fn(maybe_add_request_id_header))
|
|
||||||
.layer(
|
|
||||||
TraceLayer::new_for_http()
|
|
||||||
.on_request(|request: &http::Request<_>, _span: &Span| {
|
|
||||||
let request_id = request
|
|
||||||
.headers()
|
|
||||||
.get(X_REQUEST_ID)
|
|
||||||
.unwrap()
|
|
||||||
.to_str()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
match request.uri().path() {
|
|
||||||
"/metrics" => {
|
|
||||||
debug!(%request_id, "{} {}", request.method(), request.uri())
|
|
||||||
}
|
|
||||||
_ => info!(%request_id, "{} {}", request.method(), request.uri()),
|
|
||||||
};
|
|
||||||
})
|
|
||||||
.on_response(
|
|
||||||
|response: &http::Response<_>, latency: Duration, _span: &Span| {
|
|
||||||
let request_id = response
|
|
||||||
.headers()
|
|
||||||
.get(X_REQUEST_ID)
|
|
||||||
.unwrap()
|
|
||||||
.to_str()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
info!(
|
|
||||||
%request_id,
|
|
||||||
code = response.status().as_u16(),
|
|
||||||
latency = latency.as_millis()
|
|
||||||
)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.layer(PropagateRequestIdLayer::x_request_id()),
|
|
||||||
)
|
|
||||||
.with_state(compute);
|
|
||||||
|
|
||||||
// Add in any testing support
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
use super::routes::failpoints;
|
|
||||||
|
|
||||||
app = app.route("/failpoints", post(failpoints::configure_failpoints))
|
|
||||||
}
|
|
||||||
|
|
||||||
// This usually binds to both IPv4 and IPv6 on Linux, see
|
|
||||||
// https://github.com/rust-lang/rust/pull/34440 for more information
|
|
||||||
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
|
|
||||||
let listener = match TcpListener::bind(&addr).await {
|
|
||||||
Ok(listener) => listener,
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"failed to bind the compute_ctl HTTP server to port {}: {}",
|
|
||||||
port, e
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(local_addr) = listener.local_addr() {
|
|
||||||
info!("compute_ctl HTTP server listening on {}", local_addr);
|
|
||||||
} else {
|
|
||||||
info!("compute_ctl HTTP server listening on port {}", port);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = axum::serve(listener, app).await {
|
|
||||||
error!("compute_ctl HTTP server error: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Launch a separate HTTP server thread and return its `JoinHandle`.
|
|
||||||
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
|
||||||
let state = Arc::clone(state);
|
|
||||||
|
|
||||||
Ok(thread::Builder::new()
|
|
||||||
.name("http-server".into())
|
|
||||||
.spawn(move || serve(port, state))?)
|
|
||||||
}
|
|
||||||
@@ -1,10 +1,14 @@
|
|||||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||||
|
use metrics::proto::MetricFamily;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
|
|
||||||
use crate::metrics::INSTALLED_EXTENSIONS;
|
use metrics::core::Collector;
|
||||||
|
use metrics::{register_uint_gauge_vec, UIntGaugeVec};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
/// We don't reuse get_existing_dbs() just for code clarity
|
/// We don't reuse get_existing_dbs() just for code clarity
|
||||||
/// and to make database listing query here more explicit.
|
/// and to make database listing query here more explicit.
|
||||||
@@ -34,68 +38,65 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
|
|||||||
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
|
||||||
///
|
///
|
||||||
/// Same extension can be installed in multiple databases with different versions,
|
/// Same extension can be installed in multiple databases with different versions,
|
||||||
/// so we report a separate metric (number of databases where it is installed)
|
/// we only keep the highest and lowest version across all databases.
|
||||||
/// for each extension version.
|
|
||||||
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
|
||||||
conf.application_name("compute_ctl:get_installed_extensions");
|
conf.application_name("compute_ctl:get_installed_extensions");
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
let databases: Vec<String> = list_dbs(&mut client)?;
|
let databases: Vec<String> = list_dbs(&mut client)?;
|
||||||
|
|
||||||
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
|
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
|
||||||
for db in databases.iter() {
|
for db in databases.iter() {
|
||||||
conf.dbname(db);
|
conf.dbname(db);
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
let extensions: Vec<(String, String, i32)> = db_client
|
let extensions: Vec<(String, String)> = db_client
|
||||||
.query(
|
.query(
|
||||||
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension",
|
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
|
||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| {
|
.map(|row| (row.get("extname"), row.get("extversion")))
|
||||||
(
|
|
||||||
row.get("extname"),
|
|
||||||
row.get("extversion"),
|
|
||||||
row.get("extowner"),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (extname, v, extowner) in extensions.iter() {
|
for (extname, v) in extensions.iter() {
|
||||||
let version = v.to_string();
|
let version = v.to_string();
|
||||||
|
|
||||||
// check if the extension is owned by superuser
|
// increment the number of databases where the version of extension is installed
|
||||||
// 10 is the oid of superuser
|
INSTALLED_EXTENSIONS
|
||||||
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
|
.with_label_values(&[extname, &version])
|
||||||
|
.inc();
|
||||||
|
|
||||||
extensions_map
|
extensions_map
|
||||||
.entry((
|
.entry(extname.to_string())
|
||||||
extname.to_string(),
|
|
||||||
version.clone(),
|
|
||||||
owned_by_superuser.to_string(),
|
|
||||||
))
|
|
||||||
.and_modify(|e| {
|
.and_modify(|e| {
|
||||||
|
e.versions.insert(version.clone());
|
||||||
// count the number of databases where the extension is installed
|
// count the number of databases where the extension is installed
|
||||||
e.n_databases += 1;
|
e.n_databases += 1;
|
||||||
})
|
})
|
||||||
.or_insert(InstalledExtension {
|
.or_insert(InstalledExtension {
|
||||||
extname: extname.to_string(),
|
extname: extname.to_string(),
|
||||||
version: version.clone(),
|
versions: HashSet::from([version.clone()]),
|
||||||
n_databases: 1,
|
n_databases: 1,
|
||||||
owned_by_superuser: owned_by_superuser.to_string(),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (key, ext) in extensions_map.iter() {
|
let res = InstalledExtensions {
|
||||||
let (extname, version, owned_by_superuser) = key;
|
|
||||||
let n_databases = ext.n_databases as u64;
|
|
||||||
|
|
||||||
INSTALLED_EXTENSIONS
|
|
||||||
.with_label_values(&[extname, version, owned_by_superuser])
|
|
||||||
.set(n_databases);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(InstalledExtensions {
|
|
||||||
extensions: extensions_map.into_values().collect(),
|
extensions: extensions_map.into_values().collect(),
|
||||||
})
|
};
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
|
register_uint_gauge_vec!(
|
||||||
|
"compute_installed_extensions",
|
||||||
|
"Number of databases where the version of extension is installed",
|
||||||
|
&["extension_name", "version"]
|
||||||
|
)
|
||||||
|
.expect("failed to define a metric")
|
||||||
|
});
|
||||||
|
|
||||||
|
pub fn collect() -> Vec<MetricFamily> {
|
||||||
|
INSTALLED_EXTENSIONS.collect()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
#![deny(unsafe_code)]
|
#![deny(unsafe_code)]
|
||||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||||
|
|
||||||
|
extern crate hyper0 as hyper;
|
||||||
|
|
||||||
pub mod checker;
|
pub mod checker;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod configurator;
|
pub mod configurator;
|
||||||
@@ -16,7 +18,6 @@ pub mod extension_server;
|
|||||||
pub mod installed_extensions;
|
pub mod installed_extensions;
|
||||||
pub mod local_proxy;
|
pub mod local_proxy;
|
||||||
pub mod lsn_lease;
|
pub mod lsn_lease;
|
||||||
pub mod metrics;
|
|
||||||
mod migration;
|
mod migration;
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
|
|||||||
@@ -1,70 +0,0 @@
|
|||||||
use metrics::core::Collector;
|
|
||||||
use metrics::proto::MetricFamily;
|
|
||||||
use metrics::{register_int_counter_vec, register_uint_gauge_vec, IntCounterVec, UIntGaugeVec};
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
|
|
||||||
pub(crate) static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
|
||||||
register_uint_gauge_vec!(
|
|
||||||
"compute_installed_extensions",
|
|
||||||
"Number of databases where the version of extension is installed",
|
|
||||||
&["extension_name", "version", "owned_by_superuser"]
|
|
||||||
)
|
|
||||||
.expect("failed to define a metric")
|
|
||||||
});
|
|
||||||
|
|
||||||
// Normally, any HTTP API request is described by METHOD (e.g. GET, POST, etc.) + PATH,
|
|
||||||
// but for all our APIs we defined a 'slug'/method/operationId in the OpenAPI spec.
|
|
||||||
// And it's fair to call it a 'RPC' (Remote Procedure Call).
|
|
||||||
pub enum CPlaneRequestRPC {
|
|
||||||
GetSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CPlaneRequestRPC {
|
|
||||||
pub fn as_str(&self) -> &str {
|
|
||||||
match self {
|
|
||||||
CPlaneRequestRPC::GetSpec => "GetSpec",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const UNKNOWN_HTTP_STATUS: &str = "unknown";
|
|
||||||
|
|
||||||
pub(crate) static CPLANE_REQUESTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| {
|
|
||||||
register_int_counter_vec!(
|
|
||||||
"compute_ctl_cplane_requests_total",
|
|
||||||
"Total number of control plane requests made by compute_ctl by status",
|
|
||||||
&["rpc", "http_status"]
|
|
||||||
)
|
|
||||||
.expect("failed to define a metric")
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Total number of failed database migrations. Per-compute, this is actually a boolean metric,
|
|
||||||
/// either empty or with a single value (1, migration_id) because we stop at the first failure.
|
|
||||||
/// Yet, the sum over the fleet will provide the total number of failures.
|
|
||||||
pub(crate) static DB_MIGRATION_FAILED: Lazy<IntCounterVec> = Lazy::new(|| {
|
|
||||||
register_int_counter_vec!(
|
|
||||||
"compute_ctl_db_migration_failed_total",
|
|
||||||
"Total number of failed database migrations",
|
|
||||||
&["migration_id"]
|
|
||||||
)
|
|
||||||
.expect("failed to define a metric")
|
|
||||||
});
|
|
||||||
|
|
||||||
pub(crate) static REMOTE_EXT_REQUESTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| {
|
|
||||||
register_int_counter_vec!(
|
|
||||||
"compute_ctl_remote_ext_requests_total",
|
|
||||||
"Total number of requests made by compute_ctl to download extensions from S3 proxy by status",
|
|
||||||
// Do not use any labels like extension name yet.
|
|
||||||
// We can add them later if needed.
|
|
||||||
&["http_status"]
|
|
||||||
)
|
|
||||||
.expect("failed to define a metric")
|
|
||||||
});
|
|
||||||
|
|
||||||
pub fn collect() -> Vec<MetricFamily> {
|
|
||||||
let mut metrics = INSTALLED_EXTENSIONS.collect();
|
|
||||||
metrics.extend(CPLANE_REQUESTS_TOTAL.collect());
|
|
||||||
metrics.extend(REMOTE_EXT_REQUESTS_TOTAL.collect());
|
|
||||||
metrics.extend(DB_MIGRATION_FAILED.collect());
|
|
||||||
metrics
|
|
||||||
}
|
|
||||||
@@ -1,18 +1,13 @@
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use fail::fail_point;
|
use postgres::Client;
|
||||||
use postgres::{Client, Transaction};
|
use tracing::info;
|
||||||
use tracing::{error, info};
|
|
||||||
|
|
||||||
use crate::metrics::DB_MIGRATION_FAILED;
|
|
||||||
|
|
||||||
/// Runs a series of migrations on a target database
|
|
||||||
pub(crate) struct MigrationRunner<'m> {
|
pub(crate) struct MigrationRunner<'m> {
|
||||||
client: &'m mut Client,
|
client: &'m mut Client,
|
||||||
migrations: &'m [&'m str],
|
migrations: &'m [&'m str],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'m> MigrationRunner<'m> {
|
impl<'m> MigrationRunner<'m> {
|
||||||
/// Create a new migration runner
|
|
||||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
||||||
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
||||||
assert!(migrations.len() + 1 < i64::MAX as usize);
|
assert!(migrations.len() + 1 < i64::MAX as usize);
|
||||||
@@ -20,117 +15,86 @@ impl<'m> MigrationRunner<'m> {
|
|||||||
Self { client, migrations }
|
Self { client, migrations }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the current value neon_migration.migration_id
|
|
||||||
fn get_migration_id(&mut self) -> Result<i64> {
|
fn get_migration_id(&mut self) -> Result<i64> {
|
||||||
|
let query = "SELECT id FROM neon_migration.migration_id";
|
||||||
let row = self
|
let row = self
|
||||||
.client
|
.client
|
||||||
.query_one("SELECT id FROM neon_migration.migration_id", &[])?;
|
.query_one(query, &[])
|
||||||
|
.context("run_migrations get migration_id")?;
|
||||||
|
|
||||||
Ok(row.get::<&str, i64>("id"))
|
Ok(row.get::<&str, i64>("id"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the neon_migration.migration_id value
|
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
|
||||||
///
|
let setval = format!("UPDATE neon_migration.migration_id SET id={}", migration_id);
|
||||||
/// This function has a fail point called compute-migration, which can be
|
|
||||||
/// used if you would like to fail the application of a series of migrations
|
|
||||||
/// at some point.
|
|
||||||
fn update_migration_id(txn: &mut Transaction, migration_id: i64) -> Result<()> {
|
|
||||||
// We use this fail point in order to check that failing in the
|
|
||||||
// middle of applying a series of migrations fails in an expected
|
|
||||||
// manner
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
let fail = (|| {
|
|
||||||
fail_point!("compute-migration", |fail_migration_id| {
|
|
||||||
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
false
|
self.client
|
||||||
})();
|
.simple_query(&setval)
|
||||||
|
.context("run_migrations update id")?;
|
||||||
if fail {
|
|
||||||
return Err(anyhow::anyhow!(format!(
|
|
||||||
"migration {} was configured to fail because of a failpoint",
|
|
||||||
migration_id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.query(
|
|
||||||
"UPDATE neon_migration.migration_id SET id = $1",
|
|
||||||
&[&migration_id],
|
|
||||||
)
|
|
||||||
.with_context(|| format!("update neon_migration.migration_id to {migration_id}"))?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepare the migrations the target database for handling migrations
|
fn prepare_migrations(&mut self) -> Result<()> {
|
||||||
fn prepare_database(&mut self) -> Result<()> {
|
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")?;
|
|
||||||
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)")?;
|
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
||||||
self.client.simple_query(
|
self.client.simple_query(query)?;
|
||||||
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
|
|
||||||
)?;
|
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
||||||
self.client
|
self.client.simple_query(query)?;
|
||||||
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")?;
|
|
||||||
self.client
|
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
||||||
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")?;
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
|
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
||||||
|
self.client.simple_query(query)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Run an individual migration in a separate transaction block.
|
|
||||||
fn run_migration(client: &mut Client, migration_id: i64, migration: &str) -> Result<()> {
|
|
||||||
let mut txn = client
|
|
||||||
.transaction()
|
|
||||||
.with_context(|| format!("begin transaction for migration {migration_id}"))?;
|
|
||||||
|
|
||||||
if migration.starts_with("-- SKIP") {
|
|
||||||
info!("Skipping migration id={}", migration_id);
|
|
||||||
|
|
||||||
// Even though we are skipping the migration, updating the
|
|
||||||
// migration ID should help keep logic easy to understand when
|
|
||||||
// trying to understand the state of a cluster.
|
|
||||||
Self::update_migration_id(&mut txn, migration_id)?;
|
|
||||||
} else {
|
|
||||||
info!("Running migration id={}:\n{}\n", migration_id, migration);
|
|
||||||
|
|
||||||
txn.simple_query(migration)
|
|
||||||
.with_context(|| format!("apply migration {migration_id}"))?;
|
|
||||||
|
|
||||||
Self::update_migration_id(&mut txn, migration_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit()
|
|
||||||
.with_context(|| format!("commit transaction for migration {migration_id}"))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the configured set of migrations
|
|
||||||
pub fn run_migrations(mut self) -> Result<()> {
|
pub fn run_migrations(mut self) -> Result<()> {
|
||||||
self.prepare_database()
|
self.prepare_migrations()?;
|
||||||
.context("prepare database to handle migrations")?;
|
|
||||||
|
|
||||||
let mut current_migration = self.get_migration_id()? as usize;
|
let mut current_migration = self.get_migration_id()? as usize;
|
||||||
while current_migration < self.migrations.len() {
|
while current_migration < self.migrations.len() {
|
||||||
// The index lags the migration ID by 1, so the current migration
|
macro_rules! migration_id {
|
||||||
// ID is also the next index
|
($cm:expr) => {
|
||||||
let migration_id = (current_migration + 1) as i64;
|
($cm + 1) as i64
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
let migration = self.migrations[current_migration];
|
let migration = self.migrations[current_migration];
|
||||||
|
|
||||||
match Self::run_migration(self.client, migration_id, migration) {
|
if migration.starts_with("-- SKIP") {
|
||||||
Ok(_) => {
|
info!("Skipping migration id={}", migration_id!(current_migration));
|
||||||
info!("Finished migration id={}", migration_id);
|
} else {
|
||||||
}
|
info!(
|
||||||
Err(e) => {
|
"Running migration id={}:\n{}\n",
|
||||||
error!("Failed to run migration id={}: {:?}", migration_id, e);
|
migration_id!(current_migration),
|
||||||
DB_MIGRATION_FAILED
|
migration
|
||||||
.with_label_values(&[migration_id.to_string().as_str()])
|
);
|
||||||
.inc();
|
|
||||||
return Err(e);
|
self.client
|
||||||
}
|
.simple_query("BEGIN")
|
||||||
|
.context("begin migration")?;
|
||||||
|
|
||||||
|
self.client.simple_query(migration).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"run_migrations migration id={}",
|
||||||
|
migration_id!(current_migration)
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Migration IDs start at 1
|
||||||
|
self.update_migration_id(migration_id!(current_migration))?;
|
||||||
|
|
||||||
|
self.client
|
||||||
|
.simple_query("COMMIT")
|
||||||
|
.context("commit migration")?;
|
||||||
|
|
||||||
|
info!("Finished migration id={}", migration_id!(current_migration));
|
||||||
}
|
}
|
||||||
|
|
||||||
current_migration += 1;
|
current_migration += 1;
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
bypassrls boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT rolbypassrls INTO bypassrls FROM pg_roles WHERE rolname = 'neon_superuser';
|
|
||||||
IF NOT bypassrls THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot bypass RLS';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
role record;
|
|
||||||
BEGIN
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolinherit AS inherit
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
LOOP
|
|
||||||
IF NOT role.inherit THEN
|
|
||||||
RAISE EXCEPTION '% cannot inherit', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
FOR role IN
|
|
||||||
SELECT rolname AS name, rolbypassrls AS bypassrls
|
|
||||||
FROM pg_roles
|
|
||||||
WHERE NOT pg_has_role(rolname, 'neon_superuser', 'member')
|
|
||||||
AND NOT starts_with(rolname, 'pg_')
|
|
||||||
LOOP
|
|
||||||
IF role.bypassrls THEN
|
|
||||||
RAISE EXCEPTION '% can bypass RLS', quote_ident(role.name);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
END $$;
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF (SELECT current_setting('server_version_num')::numeric < 160000) THEN
|
|
||||||
RETURN;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT (SELECT pg_has_role('neon_superuser', 'pg_create_subscription', 'member')) THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_create_subscription';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
monitor record;
|
|
||||||
BEGIN
|
|
||||||
SELECT pg_has_role('neon_superuser', 'pg_monitor', 'member') AS member,
|
|
||||||
admin_option AS admin
|
|
||||||
INTO monitor
|
|
||||||
FROM pg_auth_members
|
|
||||||
WHERE roleid = 'pg_monitor'::regrole
|
|
||||||
AND member = 'pg_monitor'::regrole;
|
|
||||||
|
|
||||||
IF NOT monitor.member THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF NOT monitor.admin THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
-- This test was never written becuase at the time migration tests were added
|
|
||||||
-- the accompanying migration was already skipped.
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT bool_and(has_function_privilege('neon_superuser', oid, 'execute'))
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname IN ('pg_export_snapshot', 'pg_log_standby_snapshot')
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute both pg_export_snapshot and pg_log_standby_snapshot';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
can_execute boolean;
|
|
||||||
BEGIN
|
|
||||||
SELECT has_function_privilege('neon_superuser', oid, 'execute')
|
|
||||||
INTO can_execute
|
|
||||||
FROM pg_proc
|
|
||||||
WHERE proname = 'pg_show_replication_origin_status'
|
|
||||||
AND pronamespace = 'pg_catalog'::regnamespace;
|
|
||||||
IF NOT can_execute THEN
|
|
||||||
RAISE EXCEPTION 'neon_superuser cannot execute pg_show_replication_origin_status';
|
|
||||||
END IF;
|
|
||||||
END $$;
|
|
||||||
@@ -6,7 +6,6 @@ use std::path::Path;
|
|||||||
use tracing::{error, info, instrument, warn};
|
use tracing::{error, info, instrument, warn};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::metrics::{CPlaneRequestRPC, CPLANE_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
|
|
||||||
use crate::migration::MigrationRunner;
|
use crate::migration::MigrationRunner;
|
||||||
use crate::params::PG_HBA_ALL_MD5;
|
use crate::params::PG_HBA_ALL_MD5;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
@@ -20,7 +19,7 @@ use compute_api::spec::ComputeSpec;
|
|||||||
fn do_control_plane_request(
|
fn do_control_plane_request(
|
||||||
uri: &str,
|
uri: &str,
|
||||||
jwt: &str,
|
jwt: &str,
|
||||||
) -> Result<ControlPlaneSpecResponse, (bool, String, String)> {
|
) -> Result<ControlPlaneSpecResponse, (bool, String)> {
|
||||||
let resp = reqwest::blocking::Client::new()
|
let resp = reqwest::blocking::Client::new()
|
||||||
.get(uri)
|
.get(uri)
|
||||||
.header("Authorization", format!("Bearer {}", jwt))
|
.header("Authorization", format!("Bearer {}", jwt))
|
||||||
@@ -28,42 +27,35 @@ fn do_control_plane_request(
|
|||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
(
|
(
|
||||||
true,
|
true,
|
||||||
format!("could not perform spec request to control plane: {:?}", e),
|
format!("could not perform spec request to control plane: {}", e),
|
||||||
UNKNOWN_HTTP_STATUS.to_string(),
|
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let status = resp.status();
|
match resp.status() {
|
||||||
match status {
|
|
||||||
StatusCode::OK => match resp.json::<ControlPlaneSpecResponse>() {
|
StatusCode::OK => match resp.json::<ControlPlaneSpecResponse>() {
|
||||||
Ok(spec_resp) => Ok(spec_resp),
|
Ok(spec_resp) => Ok(spec_resp),
|
||||||
Err(e) => Err((
|
Err(e) => Err((
|
||||||
true,
|
true,
|
||||||
format!("could not deserialize control plane response: {:?}", e),
|
format!("could not deserialize control plane response: {}", e),
|
||||||
status.to_string(),
|
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
StatusCode::SERVICE_UNAVAILABLE => Err((
|
StatusCode::SERVICE_UNAVAILABLE => {
|
||||||
true,
|
Err((true, "control plane is temporarily unavailable".to_string()))
|
||||||
"control plane is temporarily unavailable".to_string(),
|
}
|
||||||
status.to_string(),
|
|
||||||
)),
|
|
||||||
StatusCode::BAD_GATEWAY => {
|
StatusCode::BAD_GATEWAY => {
|
||||||
// We have a problem with intermittent 502 errors now
|
// We have a problem with intermittent 502 errors now
|
||||||
// https://github.com/neondatabase/cloud/issues/2353
|
// https://github.com/neondatabase/cloud/issues/2353
|
||||||
// It's fine to retry GET request in this case.
|
// It's fine to retry GET request in this case.
|
||||||
Err((
|
Err((true, "control plane request failed with 502".to_string()))
|
||||||
true,
|
|
||||||
"control plane request failed with 502".to_string(),
|
|
||||||
status.to_string(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
// Another code, likely 500 or 404, means that compute is unknown to the control plane
|
// Another code, likely 500 or 404, means that compute is unknown to the control plane
|
||||||
// or some internal failure happened. Doesn't make much sense to retry in this case.
|
// or some internal failure happened. Doesn't make much sense to retry in this case.
|
||||||
_ => Err((
|
_ => Err((
|
||||||
false,
|
false,
|
||||||
format!("unexpected control plane response status code: {}", status),
|
format!(
|
||||||
status.to_string(),
|
"unexpected control plane response status code: {}",
|
||||||
|
resp.status()
|
||||||
|
),
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -91,28 +83,17 @@ pub fn get_spec_from_control_plane(
|
|||||||
// - got spec -> return Ok(Some(spec))
|
// - got spec -> return Ok(Some(spec))
|
||||||
while attempt < 4 {
|
while attempt < 4 {
|
||||||
spec = match do_control_plane_request(&cp_uri, &jwt) {
|
spec = match do_control_plane_request(&cp_uri, &jwt) {
|
||||||
Ok(spec_resp) => {
|
Ok(spec_resp) => match spec_resp.status {
|
||||||
CPLANE_REQUESTS_TOTAL
|
ControlPlaneComputeStatus::Empty => Ok(None),
|
||||||
.with_label_values(&[
|
ControlPlaneComputeStatus::Attached => {
|
||||||
CPlaneRequestRPC::GetSpec.as_str(),
|
if let Some(spec) = spec_resp.spec {
|
||||||
&StatusCode::OK.to_string(),
|
Ok(Some(spec))
|
||||||
])
|
} else {
|
||||||
.inc();
|
bail!("compute is attached, but spec is empty")
|
||||||
match spec_resp.status {
|
|
||||||
ControlPlaneComputeStatus::Empty => Ok(None),
|
|
||||||
ControlPlaneComputeStatus::Attached => {
|
|
||||||
if let Some(spec) = spec_resp.spec {
|
|
||||||
Ok(Some(spec))
|
|
||||||
} else {
|
|
||||||
bail!("compute is attached, but spec is empty")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
Err((retry, msg, status)) => {
|
Err((retry, msg)) => {
|
||||||
CPLANE_REQUESTS_TOTAL
|
|
||||||
.with_label_values(&[CPlaneRequestRPC::GetSpec.as_str(), &status])
|
|
||||||
.inc();
|
|
||||||
if retry {
|
if retry {
|
||||||
Err(anyhow!(msg))
|
Err(anyhow!(msg))
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ pub enum PerDatabasePhase {
|
|||||||
DeleteDBRoleReferences,
|
DeleteDBRoleReferences,
|
||||||
ChangeSchemaPerms,
|
ChangeSchemaPerms,
|
||||||
HandleAnonExtension,
|
HandleAnonExtension,
|
||||||
DropLogicalSubscriptions,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -58,13 +57,11 @@ pub enum ApplySpecPhase {
|
|||||||
CreateAndAlterRoles,
|
CreateAndAlterRoles,
|
||||||
RenameAndDeleteDatabases,
|
RenameAndDeleteDatabases,
|
||||||
CreateAndAlterDatabases,
|
CreateAndAlterDatabases,
|
||||||
CreateSchemaNeon,
|
|
||||||
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
||||||
HandleOtherExtensions,
|
HandleOtherExtensions,
|
||||||
HandleNeonExtension,
|
HandleNeonExtension,
|
||||||
CreateAvailabilityCheck,
|
CreateAvailabilityCheck,
|
||||||
DropRoles,
|
DropRoles,
|
||||||
FinalizeDropLogicalSubscriptions,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Operation {
|
pub struct Operation {
|
||||||
@@ -77,7 +74,7 @@ pub struct MutableApplyContext {
|
|||||||
pub dbs: HashMap<String, Database>,
|
pub dbs: HashMap<String, Database>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply the operations that belong to the given spec apply phase.
|
/// Appply the operations that belong to the given spec apply phase.
|
||||||
///
|
///
|
||||||
/// Commands within a single phase are executed in order of Iterator yield.
|
/// Commands within a single phase are executed in order of Iterator yield.
|
||||||
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
|
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
|
||||||
@@ -329,12 +326,13 @@ async fn get_operations<'a>(
|
|||||||
|
|
||||||
// Use FORCE to drop database even if there are active connections.
|
// Use FORCE to drop database even if there are active connections.
|
||||||
// We run this from `cloud_admin`, so it should have enough privileges.
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
//
|
|
||||||
// NB: there could be other db states, which prevent us from dropping
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
// the database. For example, if db is used by any active subscription
|
// the database. For example, if db is used by any active subscription
|
||||||
// or replication slot.
|
// or replication slot.
|
||||||
// Such cases are handled in the DropLogicalSubscriptions
|
// TODO: deal with it once we allow logical replication. Proper fix should
|
||||||
// phase. We do all the cleanup before actually dropping the database.
|
// involve returning an error code to the control plane, so it could
|
||||||
|
// figure out that this is a non-retryable error, return it to the user
|
||||||
|
// and fail operation permanently.
|
||||||
let drop_db_query: String = format!(
|
let drop_db_query: String = format!(
|
||||||
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
&op.name.pg_quote()
|
&op.name.pg_quote()
|
||||||
@@ -444,38 +442,8 @@ async fn get_operations<'a>(
|
|||||||
|
|
||||||
Ok(Box::new(operations))
|
Ok(Box::new(operations))
|
||||||
}
|
}
|
||||||
ApplySpecPhase::CreateSchemaNeon => Ok(Box::new(once(Operation {
|
|
||||||
query: String::from("CREATE SCHEMA IF NOT EXISTS neon"),
|
|
||||||
comment: Some(String::from(
|
|
||||||
"create schema for neon extension and utils tables",
|
|
||||||
)),
|
|
||||||
}))),
|
|
||||||
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
||||||
match subphase {
|
match subphase {
|
||||||
PerDatabasePhase::DropLogicalSubscriptions => {
|
|
||||||
match &db {
|
|
||||||
DB::UserDB(db) => {
|
|
||||||
let drop_subscription_query: String = format!(
|
|
||||||
include_str!("sql/drop_subscriptions.sql"),
|
|
||||||
datname_str = escape_literal(&db.name),
|
|
||||||
);
|
|
||||||
|
|
||||||
let operations = vec![Operation {
|
|
||||||
query: drop_subscription_query,
|
|
||||||
comment: Some(format!(
|
|
||||||
"optionally dropping subscriptions for DB {}",
|
|
||||||
db.name,
|
|
||||||
)),
|
|
||||||
}]
|
|
||||||
.into_iter();
|
|
||||||
|
|
||||||
Ok(Box::new(operations))
|
|
||||||
}
|
|
||||||
// skip this cleanup for the system databases
|
|
||||||
// because users can't drop them
|
|
||||||
DB::SystemDB => Ok(Box::new(empty())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PerDatabasePhase::DeleteDBRoleReferences => {
|
PerDatabasePhase::DeleteDBRoleReferences => {
|
||||||
let ctx = ctx.read().await;
|
let ctx = ctx.read().await;
|
||||||
|
|
||||||
@@ -506,19 +474,7 @@ async fn get_operations<'a>(
|
|||||||
),
|
),
|
||||||
comment: None,
|
comment: None,
|
||||||
},
|
},
|
||||||
// Revoke some potentially blocking privileges (Neon-specific currently)
|
|
||||||
Operation {
|
|
||||||
query: format!(
|
|
||||||
include_str!("sql/pre_drop_role_revoke_privileges.sql"),
|
|
||||||
role_name = quoted,
|
|
||||||
),
|
|
||||||
comment: None,
|
|
||||||
},
|
|
||||||
// This now will only drop privileges of the role
|
// This now will only drop privileges of the role
|
||||||
// TODO: this is obviously not 100% true because of the above case,
|
|
||||||
// there could be still some privileges that are not revoked. Maybe this
|
|
||||||
// only drops privileges that were granted *by this* role, not *to this* role,
|
|
||||||
// but this has to be checked.
|
|
||||||
Operation {
|
Operation {
|
||||||
query: format!("DROP OWNED BY {}", quoted),
|
query: format!("DROP OWNED BY {}", quoted),
|
||||||
comment: None,
|
comment: None,
|
||||||
@@ -674,6 +630,10 @@ async fn get_operations<'a>(
|
|||||||
}
|
}
|
||||||
ApplySpecPhase::HandleNeonExtension => {
|
ApplySpecPhase::HandleNeonExtension => {
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
|
Operation {
|
||||||
|
query: String::from("CREATE SCHEMA IF NOT EXISTS neon"),
|
||||||
|
comment: Some(String::from("init: add schema for extension")),
|
||||||
|
},
|
||||||
Operation {
|
Operation {
|
||||||
query: String::from("CREATE EXTENSION IF NOT EXISTS neon WITH SCHEMA neon"),
|
query: String::from("CREATE EXTENSION IF NOT EXISTS neon WITH SCHEMA neon"),
|
||||||
comment: Some(String::from(
|
comment: Some(String::from(
|
||||||
@@ -716,9 +676,5 @@ async fn get_operations<'a>(
|
|||||||
|
|
||||||
Ok(Box::new(operations))
|
Ok(Box::new(operations))
|
||||||
}
|
}
|
||||||
ApplySpecPhase::FinalizeDropLogicalSubscriptions => Ok(Box::new(once(Operation {
|
|
||||||
query: String::from(include_str!("sql/finalize_drop_subscriptions.sql")),
|
|
||||||
comment: None,
|
|
||||||
}))),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
subname TEXT;
|
|
||||||
BEGIN
|
|
||||||
FOR subname IN SELECT pg_subscription.subname FROM pg_subscription WHERE subdbid = (SELECT oid FROM pg_database WHERE datname = {datname_str}) LOOP
|
|
||||||
EXECUTE format('ALTER SUBSCRIPTION %I DISABLE;', subname);
|
|
||||||
EXECUTE format('ALTER SUBSCRIPTION %I SET (slot_name = NONE);', subname);
|
|
||||||
EXECUTE format('DROP SUBSCRIPTION %I;', subname);
|
|
||||||
END LOOP;
|
|
||||||
END;
|
|
||||||
$$;
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
DO $$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS(
|
|
||||||
SELECT 1
|
|
||||||
FROM pg_catalog.pg_tables
|
|
||||||
WHERE tablename = 'drop_subscriptions_done'
|
|
||||||
AND schemaname = 'neon'
|
|
||||||
)
|
|
||||||
THEN
|
|
||||||
CREATE TABLE neon.drop_subscriptions_done
|
|
||||||
(id serial primary key, timeline_id text);
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- preserve the timeline_id of the last drop_subscriptions run
|
|
||||||
-- to ensure that the cleanup of a timeline is executed only once.
|
|
||||||
-- use upsert to avoid the table bloat in case of cascade branching (branch of a branch)
|
|
||||||
INSERT INTO neon.drop_subscriptions_done VALUES (1, current_setting('neon.timeline_id'))
|
|
||||||
ON CONFLICT (id) DO UPDATE
|
|
||||||
SET timeline_id = current_setting('neon.timeline_id');
|
|
||||||
END
|
|
||||||
$$
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
SET SESSION ROLE neon_superuser;
|
|
||||||
|
|
||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
schema TEXT;
|
|
||||||
revoke_query TEXT;
|
|
||||||
BEGIN
|
|
||||||
FOR schema IN
|
|
||||||
SELECT schema_name
|
|
||||||
FROM information_schema.schemata
|
|
||||||
-- So far, we only had issues with 'public' schema. Probably, because we do some additional grants,
|
|
||||||
-- e.g., make DB owner the owner of 'public' schema automatically (when created via API).
|
|
||||||
-- See https://github.com/neondatabase/cloud/issues/13582 for the context.
|
|
||||||
-- Still, keep the loop because i) it efficiently handles the case when there is no 'public' schema,
|
|
||||||
-- ii) it's easy to add more schemas to the list if needed.
|
|
||||||
WHERE schema_name IN ('public')
|
|
||||||
LOOP
|
|
||||||
revoke_query := format(
|
|
||||||
'REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %I FROM {role_name} GRANTED BY neon_superuser;',
|
|
||||||
schema
|
|
||||||
);
|
|
||||||
|
|
||||||
EXECUTE revoke_query;
|
|
||||||
END LOOP;
|
|
||||||
END;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
RESET ROLE;
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user