name: 'Run python test' description: 'Runs a Neon python test set, performing all the required preparations before' inputs: build_type: description: 'Type of Rust (neon) and C (postgres) builds. Must be "release" or "debug", or "remote" for the remote cluster' required: true test_selection: description: 'A python test suite to run' required: true extra_params: description: 'Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr' required: false default: '' needs_postgres_source: description: 'Set to true if the test suite requires postgres source checked out' required: false default: 'false' run_in_parallel: description: 'Whether to run tests in parallel' required: false default: 'true' save_perf_report: description: 'Whether to upload the performance report, if true PERF_TEST_RESULT_CONNSTR env variable should be set' required: false default: 'false' run_with_real_s3: description: 'Whether to pass real s3 credentials to the test suite' required: false default: 'false' real_s3_bucket: description: 'Bucket name for real s3 tests' required: false default: '' real_s3_region: description: 'Region name for real s3 tests' required: false default: '' rerun_flaky: description: 'Whether to rerun flaky tests' required: false default: 'false' pg_version: description: 'Postgres version to use for tests' required: false default: 'v14' runs: using: "composite" steps: - name: Get Neon artifact if: inputs.build_type != 'remote' uses: ./.github/actions/download with: name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact path: /tmp/neon - name: Download Neon binaries for the previous release if: inputs.build_type != 'remote' uses: ./.github/actions/download with: name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact path: /tmp/neon-previous prefix: latest - name: Download compatibility snapshot if: inputs.build_type != 'remote' uses: ./.github/actions/download with: name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }} path: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }} prefix: latest - name: Checkout if: inputs.needs_postgres_source == 'true' uses: actions/checkout@v3 with: submodules: true fetch-depth: 1 - name: Cache poetry deps id: cache_poetry uses: actions/cache@v3 with: path: ~/.cache/pypoetry/virtualenvs key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} - name: Install Python deps shell: bash -euxo pipefail {0} run: ./scripts/pysync - name: Run pytest env: NEON_BIN: /tmp/neon/bin COMPATIBILITY_NEON_BIN: /tmp/neon-previous/bin COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install TEST_OUTPUT: /tmp/test_output BUILD_TYPE: ${{ inputs.build_type }} COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }} ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage') ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage') RERUN_FLAKY: ${{ inputs.rerun_flaky }} PG_VERSION: ${{ inputs.pg_version }} shell: bash -euxo pipefail {0} run: | # PLATFORM will be embedded in the perf test report # and it is needed to distinguish different environments export PLATFORM=${PLATFORM:-github-actions-selfhosted} export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install} export DEFAULT_PG_VERSION=${PG_VERSION#v} if [ "${BUILD_TYPE}" = "remote" ]; then export REMOTE_ENV=1 fi PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)" rm -rf $PERF_REPORT_DIR TEST_SELECTION="test_runner/${{ inputs.test_selection }}" EXTRA_PARAMS="${{ inputs.extra_params }}" if [ -z "$TEST_SELECTION" ]; then echo "test_selection must be set" exit 1 fi if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then # -n16 uses sixteen processes to run tests via pytest-xdist EXTRA_PARAMS="-n16 $EXTRA_PARAMS" # --dist=loadgroup points tests marked with @pytest.mark.xdist_group # to the same worker to make @pytest.mark.order work with xdist EXTRA_PARAMS="--dist=loadgroup $EXTRA_PARAMS" fi if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then echo "REAL S3 ENABLED" export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty export REMOTE_STORAGE_S3_BUCKET=${{ inputs.real_s3_bucket }} export REMOTE_STORAGE_S3_REGION=${{ inputs.real_s3_region }} fi if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then mkdir -p "$PERF_REPORT_DIR" EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS" fi if [ "${RERUN_FLAKY}" == "true" ]; then mkdir -p $TEST_OUTPUT poetry run ./scripts/flaky_tests.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/flaky.json" EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS" fi # We use pytest-split plugin to run benchmarks in parallel on different CI runners if [ "${TEST_SELECTION}" = "test_runner/performance" ] && [ "${{ inputs.build_type }}" != "remote" ]; then mkdir -p $TEST_OUTPUT poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/benchmark_durations.json" EXTRA_PARAMS="--durations-path $TEST_OUTPUT/benchmark_durations.json $EXTRA_PARAMS" fi if [[ "${{ inputs.build_type }}" == "debug" ]]; then cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run) elif [[ "${{ inputs.build_type }}" == "release" ]]; then cov_prefix=() else cov_prefix=() fi # Wake up the cluster if we use remote neon instance if [ "${{ inputs.build_type }}" = "remote" ] && [ -n "${BENCHMARK_CONNSTR}" ]; then ${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/psql ${BENCHMARK_CONNSTR} -c "SELECT version();" fi # Run the tests. # # The junit.xml file allows CI tools to display more fine-grained test information # in its "Tests" tab in the results page. # --verbose prints name of each test (helpful when there are # multiple tests in one file) # -rA prints summary in the end # -s is not used to prevent pytest from capturing output, because tests are running # in parallel and logs are mixed between different tests # mkdir -p $TEST_OUTPUT/allure/results "${cov_prefix[@]}" ./scripts/pytest \ --junitxml=$TEST_OUTPUT/junit.xml \ --alluredir=$TEST_OUTPUT/allure/results \ --tb=short \ --verbose \ -rA $TEST_SELECTION $EXTRA_PARAMS if [[ "${{ inputs.save_perf_report }}" == "true" ]]; then export REPORT_FROM="$PERF_REPORT_DIR" export REPORT_TO="$PLATFORM" scripts/generate_and_push_perf_report.sh fi - name: Upload compatibility snapshot if: github.ref_name == 'release' uses: ./.github/actions/upload with: name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }}-${{ github.run_id }} # Directory is created by test_compatibility.py::test_create_snapshot, keep the path in sync with the test path: /tmp/test_output/compatibility_snapshot_pg${{ inputs.pg_version }}/ prefix: latest - name: Upload test results if: ${{ !cancelled() }} uses: ./.github/actions/allure-report-store with: report-dir: /tmp/test_output/allure/results unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}