mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-14 08:52:56 +00:00
Merge batch_others and batch_pg_regress. The original idea was to split all the python tests into multiple "batches" and run each batch in parallel as a separate CI job. However, the batch_pg_regress batch was pretty short compared to all the tests in batch_others. We could split batch_others into multiple batches, but it actually seems better to just treat them as one big pool of tests and use pytest's handle the parallelism on its own. If we need to split them across multiple nodes in the future, we could use pytest-shard or something else, instead of managing the batches ourselves. Merge test_neon_regress.py, test_pg_regress.py and test_isolation.py into one file, test_pg_regress.py. Seems more clear to group all pg_regress-based tests into one file, now that they would all be in the same directory.
173 lines
6.0 KiB
YAML
173 lines
6.0 KiB
YAML
name: 'Run python test'
|
|
description: 'Runs a Neon python test set, performing all the required preparations before'
|
|
|
|
inputs:
|
|
build_type:
|
|
description: 'Type of Rust (neon) and C (postgres) builds. Must be "release" or "debug".'
|
|
required: true
|
|
rust_toolchain:
|
|
description: 'Rust toolchain version to fetch the caches'
|
|
required: true
|
|
test_selection:
|
|
description: 'A python test suite to run'
|
|
required: true
|
|
extra_params:
|
|
description: 'Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr'
|
|
required: false
|
|
default: ''
|
|
needs_postgres_source:
|
|
description: 'Set to true if the test suite requires postgres source checked out'
|
|
required: false
|
|
default: 'false'
|
|
run_in_parallel:
|
|
description: 'Whether to run tests in parallel'
|
|
required: false
|
|
default: 'true'
|
|
save_perf_report:
|
|
description: 'Whether to upload the performance report'
|
|
required: false
|
|
default: 'false'
|
|
run_with_real_s3:
|
|
description: 'Whether to pass real s3 credentials to the test suite'
|
|
required: false
|
|
default: 'false'
|
|
real_s3_bucket:
|
|
description: 'Bucket name for real s3 tests'
|
|
required: false
|
|
default: ''
|
|
real_s3_region:
|
|
description: 'Region name for real s3 tests'
|
|
required: false
|
|
default: ''
|
|
real_s3_access_key_id:
|
|
description: 'Access key id'
|
|
required: false
|
|
default: ''
|
|
real_s3_secret_access_key:
|
|
description: 'Secret access key'
|
|
required: false
|
|
default: ''
|
|
|
|
runs:
|
|
using: "composite"
|
|
steps:
|
|
- name: Get Neon artifact
|
|
uses: ./.github/actions/download
|
|
with:
|
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.rust_toolchain }}-artifact
|
|
path: /tmp/neon
|
|
|
|
- name: Checkout
|
|
if: inputs.needs_postgres_source == 'true'
|
|
uses: actions/checkout@v3
|
|
with:
|
|
submodules: true
|
|
fetch-depth: 1
|
|
|
|
- name: Cache poetry deps
|
|
id: cache_poetry
|
|
uses: actions/cache@v3
|
|
with:
|
|
path: ~/.cache/pypoetry/virtualenvs
|
|
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
|
|
|
|
- name: Install Python deps
|
|
shell: bash -euxo pipefail {0}
|
|
run: ./scripts/pysync
|
|
|
|
- name: Run pytest
|
|
env:
|
|
NEON_BIN: /tmp/neon/bin
|
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
TEST_OUTPUT: /tmp/test_output
|
|
# this variable will be embedded in perf test report
|
|
# and is needed to distinguish different environments
|
|
PLATFORM: github-actions-selfhosted
|
|
BUILD_TYPE: ${{ inputs.build_type }}
|
|
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
|
shell: bash -euxo pipefail {0}
|
|
run: |
|
|
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
|
|
rm -rf $PERF_REPORT_DIR
|
|
|
|
TEST_SELECTION="test_runner/${{ inputs.test_selection }}"
|
|
EXTRA_PARAMS="${{ inputs.extra_params }}"
|
|
if [ -z "$TEST_SELECTION" ]; then
|
|
echo "test_selection must be set"
|
|
exit 1
|
|
fi
|
|
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
|
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
|
fi
|
|
|
|
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
|
echo "REAL S3 ENABLED"
|
|
export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty
|
|
export REMOTE_STORAGE_S3_BUCKET=${{ inputs.real_s3_bucket }}
|
|
export REMOTE_STORAGE_S3_REGION=${{ inputs.real_s3_region }}
|
|
fi
|
|
|
|
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
|
if [[ "$GITHUB_REF" == "refs/heads/main" ]]; then
|
|
mkdir -p "$PERF_REPORT_DIR"
|
|
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
|
fi
|
|
fi
|
|
|
|
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
|
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
|
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
|
cov_prefix=()
|
|
fi
|
|
|
|
# Run the tests.
|
|
#
|
|
# The junit.xml file allows CI tools to display more fine-grained test information
|
|
# in its "Tests" tab in the results page.
|
|
# --verbose prints name of each test (helpful when there are
|
|
# multiple tests in one file)
|
|
# -rA prints summary in the end
|
|
# -n4 uses four processes to run tests via pytest-xdist
|
|
# -s is not used to prevent pytest from capturing output, because tests are running
|
|
# in parallel and logs are mixed between different tests
|
|
mkdir -p $TEST_OUTPUT/allure/results
|
|
"${cov_prefix[@]}" ./scripts/pytest \
|
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
|
--alluredir=$TEST_OUTPUT/allure/results \
|
|
--tb=short \
|
|
--verbose \
|
|
-m "not remote_cluster" \
|
|
-rA $TEST_SELECTION $EXTRA_PARAMS
|
|
|
|
if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then
|
|
if [[ "$GITHUB_REF" == "refs/heads/main" ]]; then
|
|
export REPORT_FROM="$PERF_REPORT_DIR"
|
|
export REPORT_TO=local
|
|
scripts/generate_and_push_perf_report.sh
|
|
fi
|
|
fi
|
|
|
|
- name: Upload Allure results
|
|
if: ${{ always() && (inputs.test_selection == 'regress') }}
|
|
uses: ./.github/actions/allure-report
|
|
with:
|
|
action: store
|
|
build_type: ${{ inputs.build_type }}
|
|
test_selection: ${{ inputs.test_selection }}
|
|
|
|
- name: Delete all data but logs
|
|
shell: bash -euxo pipefail {0}
|
|
if: always()
|
|
run: |
|
|
du -sh /tmp/test_output/*
|
|
find /tmp/test_output -type f ! -name "*.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" ! -name "*.metrics" -delete
|
|
du -sh /tmp/test_output/*
|
|
|
|
- name: Upload python test logs
|
|
if: always()
|
|
uses: ./.github/actions/upload
|
|
with:
|
|
name: python-test-${{ inputs.test_selection }}-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.rust_toolchain }}-logs
|
|
path: /tmp/test_output/
|