mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-16 01:42:55 +00:00
## Problem We use infrastructure as code (TF) to deploy AWS Aurora and AWS RDS Postgres database clusters. Whenever we have a change in TF (e.g. **every year** to upgrade to a higher Postgres version or when we change the cluster configuration) TF will apply the change and create a new AWS database cluster. However our benchmarking testcase also expects databases in these clusters and tables loaded with data. So we add auto-detection - if the AWS RDS instances are "empty" we create the necessary databases and restore a pg_dump. **Important Notes:** - These steps are NOT run in each benchmarking run, but only after a new RDS instance has been deployed. - the benchmarking workflows use GitHub secrets to find the connection string for the database. These secrets still need to be (manually or programmatically using git cli) updated if some port of the connection string (e.g. user, password or hostname) changes. ## Summary of changes In each benchmarking run check if - database has already been created - if not create it - database has already been restored - if not restore it Supported databases - tpch - clickbench - user example Supported platforms: - AWS RDS Postgres - AWS Aurora serverless Postgres Sample workflow run - but this one uses Neon database to test the restore step and not real AWS databases https://github.com/neondatabase/neon/actions/runs/10321441086/job/28574350581 Sample workflow run - with real AWS database clusters https://github.com/neondatabase/neon/actions/runs/10346816389/job/28635997653 Verification in second run - with real AWS database clusters - that second time the restore is skipped https://github.com/neondatabase/neon/actions/runs/10348469517/job/28640778223
857 lines
35 KiB
YAML
857 lines
35 KiB
YAML
name: Benchmarking
|
|
|
|
on:
|
|
# uncomment to run on push for debugging your PR
|
|
# push:
|
|
# branches: [ your branch ]
|
|
schedule:
|
|
# * is a special character in YAML so you have to quote this string
|
|
# ┌───────────── minute (0 - 59)
|
|
# │ ┌───────────── hour (0 - 23)
|
|
# │ │ ┌───────────── day of the month (1 - 31)
|
|
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
|
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
|
- cron: '0 3 * * *' # run once a day, timezone is utc
|
|
|
|
workflow_dispatch: # adds ability to run this manually
|
|
inputs:
|
|
region_id:
|
|
description: 'Project region id. If not set, the default region will be used'
|
|
required: false
|
|
default: 'aws-us-east-2'
|
|
save_perf_report:
|
|
type: boolean
|
|
description: 'Publish perf report. If not set, the report will be published only for the main branch'
|
|
required: false
|
|
collect_olap_explain:
|
|
type: boolean
|
|
description: 'Collect EXPLAIN ANALYZE for OLAP queries. If not set, EXPLAIN ANALYZE will not be collected'
|
|
required: false
|
|
default: false
|
|
collect_pg_stat_statements:
|
|
type: boolean
|
|
description: 'Collect pg_stat_statements for OLAP queries. If not set, pg_stat_statements will not be collected'
|
|
required: false
|
|
default: false
|
|
run_AWS_RDS_AND_AURORA:
|
|
type: boolean
|
|
description: 'AWS-RDS and AWS-AURORA normally only run on Saturday. Set this to true to run them on every workflow_dispatch'
|
|
required: false
|
|
default: false
|
|
run_only_pgvector_tests:
|
|
type: boolean
|
|
description: 'Run pgvector tests but no other tests. If not set, all tests including pgvector tests will be run'
|
|
required: false
|
|
default: false
|
|
|
|
defaults:
|
|
run:
|
|
shell: bash -euxo pipefail {0}
|
|
|
|
concurrency:
|
|
# Allow only one workflow per any non-`main` branch.
|
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
bench:
|
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
|
permissions:
|
|
contents: write
|
|
statuses: write
|
|
id-token: write # Required for OIDC authentication in azure runners
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
include:
|
|
- DEFAULT_PG_VERSION: 16
|
|
PLATFORM: "neon-staging"
|
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
|
RUNNER: [ self-hosted, us-east-2, x64 ]
|
|
IMAGE: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
- DEFAULT_PG_VERSION: 16
|
|
PLATFORM: "azure-staging"
|
|
region_id: 'azure-eastus2'
|
|
RUNNER: [ self-hosted, eastus2, x64 ]
|
|
IMAGE: neondatabase/build-tools:pinned
|
|
env:
|
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: ${{ matrix.DEFAULT_PG_VERSION }}
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.PLATFORM }}
|
|
|
|
runs-on: ${{ matrix.RUNNER }}
|
|
container:
|
|
image: ${{ matrix.IMAGE }}
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Configure AWS credentials # necessary on Azure runners
|
|
uses: aws-actions/configure-aws-credentials@v4
|
|
with:
|
|
aws-region: eu-central-1
|
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
role-duration-seconds: 18000 # 5 hours
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Create Neon Project
|
|
id: create-neon-project
|
|
uses: ./.github/actions/neon-project-create
|
|
with:
|
|
region_id: ${{ matrix.region_id }}
|
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
|
|
- name: Run benchmark
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
# Set --sparse-ordering option of pytest-order plugin
|
|
# to ensure tests are running in order of appears in the file.
|
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
|
extra_params:
|
|
-m remote_cluster
|
|
--sparse-ordering
|
|
--timeout 14400
|
|
--ignore test_runner/performance/test_perf_olap.py
|
|
--ignore test_runner/performance/test_perf_pgvector_queries.py
|
|
--ignore test_runner/performance/test_logical_replication.py
|
|
--ignore test_runner/performance/test_physical_replication.py
|
|
env:
|
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
|
|
- name: Delete Neon Project
|
|
if: ${{ always() }}
|
|
uses: ./.github/actions/neon-project-delete
|
|
with:
|
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic perf testing: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
replication-tests:
|
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
|
env:
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: 16
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: "neon-staging"
|
|
|
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
container:
|
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Run Logical Replication benchmarks
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_logical_replication.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 5400
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
NEON_API_KEY: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
BENCHMARK_PROJECT_ID_PUB: ${{ vars.BENCHMARK_PROJECT_ID_PUB }}
|
|
BENCHMARK_PROJECT_ID_SUB: ${{ vars.BENCHMARK_PROJECT_ID_SUB }}
|
|
|
|
- name: Run Physical Replication benchmarks
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_physical_replication.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 5400
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
NEON_API_KEY: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic replication testing: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
generate-matrices:
|
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
|
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday)
|
|
#
|
|
# Available platforms:
|
|
# - neonvm-captest-new: Freshly created project (1 CU)
|
|
# - neonvm-captest-freetier: Use freetier-sized compute (0.25 CU)
|
|
# - neonvm-captest-azure-new: Freshly created project (1 CU) in azure region
|
|
# - neonvm-captest-azure-freetier: Use freetier-sized compute (0.25 CU) in azure region
|
|
# - neonvm-captest-reuse: Reusing existing project
|
|
# - rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
env:
|
|
RUN_AWS_RDS_AND_AURORA: ${{ github.event.inputs.run_AWS_RDS_AND_AURORA || 'false' }}
|
|
DEFAULT_REGION_ID: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
|
runs-on: ubuntu-22.04
|
|
outputs:
|
|
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }}
|
|
olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }}
|
|
tpch-compare-matrix: ${{ steps.tpch-compare-matrix.outputs.matrix }}
|
|
|
|
steps:
|
|
- name: Generate matrix for pgbench benchmark
|
|
id: pgbench-compare-matrix
|
|
run: |
|
|
region_id_default=${{ env.DEFAULT_REGION_ID }}
|
|
runner_default='["self-hosted", "us-east-2", "x64"]'
|
|
runner_azure='["self-hosted", "eastus2", "x64"]'
|
|
image_default="369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned"
|
|
matrix='{
|
|
"pg_version" : [
|
|
16
|
|
],
|
|
"region_id" : [
|
|
"'"$region_id_default"'"
|
|
],
|
|
"platform": [
|
|
"neonvm-captest-new",
|
|
"neonvm-captest-reuse",
|
|
"neonvm-captest-new"
|
|
],
|
|
"db_size": [ "10gb" ],
|
|
"runner": ['"$runner_default"'],
|
|
"image": [ "'"$image_default"'" ],
|
|
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned" },
|
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned" },
|
|
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "50gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned" },
|
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-sharding-reuse", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" }]
|
|
}'
|
|
|
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
|
matrix=$(echo "$matrix" | jq '.include += [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "rds-postgres", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
|
|
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "rds-aurora", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" }]')
|
|
fi
|
|
|
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
|
|
|
- name: Generate matrix for OLAP benchmarks
|
|
id: olap-compare-matrix
|
|
run: |
|
|
matrix='{
|
|
"platform": [
|
|
"neonvm-captest-reuse"
|
|
]
|
|
}'
|
|
|
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" },
|
|
{ "platform": "rds-aurora" }]')
|
|
fi
|
|
|
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
|
|
|
- name: Generate matrix for TPC-H benchmarks
|
|
id: tpch-compare-matrix
|
|
run: |
|
|
matrix='{
|
|
"platform": [
|
|
"neonvm-captest-reuse"
|
|
],
|
|
"scale": [
|
|
"10"
|
|
]
|
|
}'
|
|
|
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" },
|
|
{ "platform": "rds-aurora", "scale": "10" }]')
|
|
fi
|
|
|
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
|
|
|
prepare_AWS_RDS_databases:
|
|
uses: ./.github/workflows/_benchmarking_preparation.yml
|
|
secrets: inherit
|
|
|
|
pgbench-compare:
|
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
|
needs: [ generate-matrices, prepare_AWS_RDS_databases ]
|
|
permissions:
|
|
contents: write
|
|
statuses: write
|
|
id-token: write # Required for OIDC authentication in azure runners
|
|
|
|
strategy:
|
|
fail-fast: false
|
|
matrix: ${{fromJson(needs.generate-matrices.outputs.pgbench-compare-matrix)}}
|
|
|
|
env:
|
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
|
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: ${{ matrix.pg_version }}
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.platform }}
|
|
|
|
runs-on: ${{ matrix.runner }}
|
|
container:
|
|
image: ${{ matrix.image }}
|
|
options: --init
|
|
|
|
# Increase timeout to 8h, default timeout is 6h
|
|
timeout-minutes: 480
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Configure AWS credentials # necessary on Azure runners
|
|
uses: aws-actions/configure-aws-credentials@v4
|
|
with:
|
|
aws-region: eu-central-1
|
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
role-duration-seconds: 18000 # 5 hours
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Create Neon Project
|
|
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
|
|
id: create-neon-project
|
|
uses: ./.github/actions/neon-project-create
|
|
with:
|
|
region_id: ${{ matrix.region_id }}
|
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
compute_units: ${{ (contains(matrix.platform, 'captest-freetier') && '[0.25, 0.25]') || '[1, 1]' }}
|
|
|
|
- name: Set up Connection String
|
|
id: set-up-connstr
|
|
run: |
|
|
case "${PLATFORM}" in
|
|
neonvm-captest-reuse)
|
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
|
;;
|
|
neonvm-captest-sharding-reuse)
|
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
|
;;
|
|
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
|
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
|
;;
|
|
rds-aurora)
|
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CONNSTR }}
|
|
;;
|
|
rds-postgres)
|
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
|
;;
|
|
*)
|
|
echo >&2 "Unknown PLATFORM=${PLATFORM}"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
|
|
- name: Benchmark init
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
|
|
- name: Benchmark simple-update
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
|
|
- name: Benchmark select-only
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
|
|
- name: Delete Neon Project
|
|
if: ${{ steps.create-neon-project.outputs.project_id && always() }}
|
|
uses: ./.github/actions/neon-project-delete
|
|
with:
|
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
pgbench-pgvector:
|
|
permissions:
|
|
contents: write
|
|
statuses: write
|
|
id-token: write # Required for OIDC authentication in azure runners
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
include:
|
|
- PLATFORM: "neonvm-captest-pgvector"
|
|
RUNNER: [ self-hosted, us-east-2, x64 ]
|
|
IMAGE: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
- PLATFORM: "azure-captest-pgvector"
|
|
RUNNER: [ self-hosted, eastus2, x64 ]
|
|
IMAGE: neondatabase/build-tools:pinned
|
|
|
|
env:
|
|
TEST_PG_BENCH_DURATIONS_MATRIX: "15m"
|
|
TEST_PG_BENCH_SCALES_MATRIX: "1"
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: 16
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
LD_LIBRARY_PATH: /home/nonroot/pg/usr/lib/x86_64-linux-gnu
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.PLATFORM }}
|
|
|
|
runs-on: ${{ matrix.RUNNER }}
|
|
container:
|
|
image: ${{ matrix.IMAGE }}
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
# until https://github.com/neondatabase/neon/issues/8275 is fixed we temporarily install postgresql-16
|
|
# instead of using Neon artifacts containing pgbench
|
|
- name: Install postgresql-16 where pytest expects it
|
|
run: |
|
|
cd /home/nonroot
|
|
wget -q https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/libpq5_16.4-1.pgdg110%2B1_amd64.deb
|
|
wget -q https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-client-16_16.4-1.pgdg110%2B1_amd64.deb
|
|
wget -q https://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-16/postgresql-16_16.4-1.pgdg110%2B1_amd64.deb
|
|
dpkg -x libpq5_16.4-1.pgdg110+1_amd64.deb pg
|
|
dpkg -x postgresql-client-16_16.4-1.pgdg110+1_amd64.deb pg
|
|
dpkg -x postgresql-16_16.4-1.pgdg110+1_amd64.deb pg
|
|
mkdir -p /tmp/neon/pg_install/v16/bin
|
|
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/pgbench /tmp/neon/pg_install/v16/bin/pgbench
|
|
ln -s /home/nonroot/pg/usr/lib/postgresql/16/bin/psql /tmp/neon/pg_install/v16/bin/psql
|
|
ln -s /home/nonroot/pg/usr/lib/x86_64-linux-gnu /tmp/neon/pg_install/v16/lib
|
|
/tmp/neon/pg_install/v16/bin/pgbench --version
|
|
/tmp/neon/pg_install/v16/bin/psql --version
|
|
|
|
- name: Set up Connection String
|
|
id: set-up-connstr
|
|
run: |
|
|
case "${PLATFORM}" in
|
|
neonvm-captest-pgvector)
|
|
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR }}
|
|
;;
|
|
azure-captest-pgvector)
|
|
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR_AZURE }}
|
|
;;
|
|
*)
|
|
echo >&2 "Unknown PLATFORM=${PLATFORM}"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
|
|
- name: Configure AWS credentials # necessary on Azure runners to read/write from/to S3
|
|
uses: aws-actions/configure-aws-credentials@v4
|
|
with:
|
|
aws-region: eu-central-1
|
|
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
|
role-duration-seconds: 18000 # 5 hours
|
|
|
|
- name: Benchmark pgvector hnsw indexing
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_perf_olap.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
|
|
- name: Benchmark pgvector queries
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_perf_pgvector_queries.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic perf testing ${PLATFORM}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
clickbench-compare:
|
|
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
|
# we use for performance testing in pgbench-compare.
|
|
# Run this job only when pgbench-compare is finished to avoid the intersection.
|
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
|
#
|
|
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
|
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
|
needs: [ generate-matrices, pgbench-compare, prepare_AWS_RDS_databases ]
|
|
|
|
strategy:
|
|
fail-fast: false
|
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
|
|
|
env:
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: 16
|
|
TEST_OUTPUT: /tmp/test_output
|
|
TEST_OLAP_COLLECT_EXPLAIN: ${{ github.event.inputs.collect_olap_explain }}
|
|
TEST_OLAP_COLLECT_PG_STAT_STATEMENTS: ${{ github.event.inputs.collect_pg_stat_statements }}
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.platform }}
|
|
|
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
container:
|
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Set up Connection String
|
|
id: set-up-connstr
|
|
run: |
|
|
case "${PLATFORM}" in
|
|
neonvm-captest-reuse)
|
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
|
;;
|
|
rds-aurora)
|
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
|
;;
|
|
rds-postgres)
|
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
|
;;
|
|
*)
|
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neonvm-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
|
|
- name: ClickBench benchmark
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_perf_olap.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_clickbench
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
TEST_OLAP_COLLECT_EXPLAIN: ${{ github.event.inputs.collect_olap_explain || 'false' }}
|
|
TEST_OLAP_COLLECT_PG_STAT_STATEMENTS: ${{ github.event.inputs.collect_pg_stat_statements || 'false' }}
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
TEST_OLAP_SCALE: 10
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic OLAP perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
tpch-compare:
|
|
# TCP-H DB for rds-aurora and rds-Postgres deployed to the same clusters
|
|
# we use for performance testing in pgbench-compare & clickbench-compare.
|
|
# Run this job only when clickbench-compare is finished to avoid the intersection.
|
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
|
#
|
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
|
needs: [ generate-matrices, clickbench-compare, prepare_AWS_RDS_databases ]
|
|
|
|
strategy:
|
|
fail-fast: false
|
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.tpch-compare-matrix) }}
|
|
|
|
env:
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: 16
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.platform }}
|
|
TEST_OLAP_SCALE: ${{ matrix.scale }}
|
|
|
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
container:
|
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Get Connstring Secret Name
|
|
run: |
|
|
case "${PLATFORM}" in
|
|
neonvm-captest-reuse)
|
|
ENV_PLATFORM=CAPTEST_TPCH
|
|
;;
|
|
rds-aurora)
|
|
ENV_PLATFORM=RDS_AURORA_TPCH
|
|
;;
|
|
rds-postgres)
|
|
ENV_PLATFORM=RDS_POSTGRES_TPCH
|
|
;;
|
|
*)
|
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neonvm-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
CONNSTR_SECRET_NAME="BENCHMARK_${ENV_PLATFORM}_S${TEST_OLAP_SCALE}_CONNSTR"
|
|
echo "CONNSTR_SECRET_NAME=${CONNSTR_SECRET_NAME}" >> $GITHUB_ENV
|
|
|
|
- name: Set up Connection String
|
|
id: set-up-connstr
|
|
run: |
|
|
CONNSTR=${{ secrets[env.CONNSTR_SECRET_NAME] }}
|
|
|
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
|
|
- name: Run TPC-H benchmark
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_perf_olap.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
TEST_OLAP_SCALE: ${{ matrix.scale }}
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
|
|
user-examples-compare:
|
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
|
needs: [ generate-matrices, tpch-compare, prepare_AWS_RDS_databases ]
|
|
|
|
strategy:
|
|
fail-fast: false
|
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
|
|
|
env:
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
DEFAULT_PG_VERSION: 16
|
|
TEST_OUTPUT: /tmp/test_output
|
|
BUILD_TYPE: remote
|
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
|
PLATFORM: ${{ matrix.platform }}
|
|
|
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
container:
|
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
|
options: --init
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Download Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
|
path: /tmp/neon/
|
|
prefix: latest
|
|
|
|
- name: Set up Connection String
|
|
id: set-up-connstr
|
|
run: |
|
|
case "${PLATFORM}" in
|
|
neonvm-captest-reuse)
|
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
|
;;
|
|
rds-aurora)
|
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_AURORA_CONNSTR }}
|
|
;;
|
|
rds-postgres)
|
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
|
;;
|
|
*)
|
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neonvm-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
|
|
- name: Run user examples
|
|
uses: ./.github/actions/run-python-test-set
|
|
with:
|
|
build_type: ${{ env.BUILD_TYPE }}
|
|
test_selection: performance/test_perf_olap.py
|
|
run_in_parallel: false
|
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
|
pg_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
env:
|
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
|
|
- name: Create Allure report
|
|
if: ${{ !cancelled() }}
|
|
uses: ./.github/actions/allure-report-generate
|
|
|
|
- name: Post to a Slack channel
|
|
if: ${{ github.event.schedule && failure() }}
|
|
uses: slackapi/slack-github-action@v1
|
|
with:
|
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
slack-message: "Periodic User example perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
env:
|
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|