Compare commits

...

6 Commits

Author SHA1 Message Date
Alexander Lakhin
eba6f85909 Add flaky tests to test how testing for flaky tests works 2025-05-14 11:31:45 +03:00
Alexander Lakhin
52888e06a0 Run testing of new tests within "Build and Test" workflow 2025-05-14 11:31:44 +03:00
Alexander Lakhin
2b51b7cbb1 Transform "Build and Run Selected Test" workflow into "Build and Test Tests" 2025-05-14 11:31:44 +03:00
Alexander Lakhin
5895abf495 Allow for adding optional postfix to allure report name 2025-05-14 11:31:44 +03:00
Alexander Lakhin
05559539f1 Adjust regress-tests step to pass list of selected tests 2025-05-14 11:31:43 +03:00
Alexander Lakhin
8f2808864b Allow for running multiple selected tests 2025-05-14 11:31:43 +03:00
9 changed files with 243 additions and 19 deletions

View File

@@ -43,7 +43,7 @@ runs:
BUCKET: neon-github-public-dev
run: |
if [ -n "${PR_NUMBER}" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER}
BRANCH_OR_PR=pr-${PR_NUMBER}${REPORT_EXT-}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
# Shortcut for special branches

View File

@@ -23,7 +23,7 @@ runs:
REPORT_DIR: ${{ inputs.report-dir }}
run: |
if [ -n "${PR_NUMBER}" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER}
BRANCH_OR_PR=pr-${PR_NUMBER}${REPORT_EXT-}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
# Shortcut for special branches

View File

@@ -12,6 +12,10 @@ inputs:
description: 'Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr'
required: false
default: ''
extended_testing:
description: 'Set to true if the test results should be stored and processed separately'
required: false
default: 'false'
needs_postgres_source:
description: 'Set to true if the test suite requires postgres source checked out'
required: false
@@ -135,13 +139,15 @@ runs:
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
echo "PERF_REPORT_DIR=${PERF_REPORT_DIR}" >> ${GITHUB_ENV}
rm -rf $PERF_REPORT_DIR
TEST_SELECTION="test_runner/${{ inputs.test_selection }}"
EXTRA_PARAMS="${{ inputs.extra_params }}"
TEST_SELECTION="${{ inputs.test_selection }}"
if [ -z "$TEST_SELECTION" ]; then
echo "test_selection must be set"
exit 1
fi
if [[ $TEST_SELECTION != test_runner/* ]]; then
TEST_SELECTION="test_runner/$TEST_SELECTION"
fi
EXTRA_PARAMS="${{ inputs.extra_params }}"
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
# -n sets the number of parallel processes that pytest-xdist will run
EXTRA_PARAMS="-n12 $EXTRA_PARAMS"
@@ -244,3 +250,5 @@ runs:
report-dir: /tmp/test_output/allure/results
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}-${{ runner.arch }}
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}
env:
REPORT_EXT: ${{ inputs.extended_testing == 'true' && '-ext' || '' }}

143
.github/scripts/detect-updated-pytests.py vendored Executable file
View File

@@ -0,0 +1,143 @@
import os
import re
import shutil
import subprocess
import sys
commit_sha = os.getenv("COMMIT_SHA")
base_sha = os.getenv("BASE_SHA")
cmd = ["git", "merge-base", base_sha, commit_sha]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0 or not (baseline := result.stdout.strip()):
print("Baseline commit for PR is not found, detection skipped.")
sys.exit(0)
print(f"Baseline commit: {baseline}")
cmd = ["git", "diff", "--name-only", f"{baseline}..{commit_sha}", "test_runner/regress/"]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
print(f"Git diff returned code {result.returncode}\n{result.stdout}\nDetection skipped.")
sys.exit(0)
def collect_tests(test_file_name):
cmd = ["./scripts/pytest", "--collect-only", "-q", test_file_name]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
print(
f"pytest --collect-only returned code {result.returncode}\n{result.stdout}\nDetection skipped."
)
sys.exit(0)
tests = []
for test_item in result.stdout.split("\n"):
if not test_item.startswith(test_file_name):
break
test_name = re.sub(r"(.*::)([^\[]+)(\[.*)", r"\2", test_item)
if test_name not in tests:
tests.append(test_name)
return tests
all_new_tests = []
all_updated_tests = []
temp_test_file = "test_runner/regress/__temp__.py"
temp_file = None
for test_file in result.stdout.split("\n"):
if not test_file:
continue
print(f"Test file modified: {test_file}.")
# Get and compare two lists of items collected by pytest to detect new tests in the PR
if temp_file:
temp_file.close()
temp_file = open(temp_test_file, "w")
cmd = ["git", "show", f"{baseline}:{test_file}"]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=temp_file)
if result.returncode != 0:
tests0 = []
else:
tests0 = collect_tests(temp_test_file)
tests1 = collect_tests(test_file)
new_tests = set(tests1).difference(tests0)
for test_name in new_tests:
all_new_tests.append(f"{test_file}::{test_name}")
# Detect pre-existing test functions updated in the PR
cmd = ["git", "diff", f"{baseline}..{commit_sha}", test_file]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
print(f"Git diff returned code {result.returncode}\n{result.stdout}\nDetection skipped.")
sys.exit(0)
updated_funcs = []
for diff_line in result.stdout.split("\n"):
print(diff_line)
# TODO: detect functions with added/modified parameters
if not diff_line.startswith("@@"):
continue
# Extract names of functions with updated content relying on hunk header
m = re.match(r"^(@@[0-9, +-]+@@ def )([^(]+)(.*)", diff_line)
if not m:
continue
func_name = m.group(2)
print(func_name) ##
# Ignore functions not collected by pytest
if func_name not in tests1:
continue
if func_name not in updated_funcs:
updated_funcs.append(func_name)
for func_name in updated_funcs:
print(f"Function modified: {func_name}.")
# Extract changes within the function
cmd = ["git", "log", f"{baseline}..{commit_sha}", "-L", f":{func_name}:{test_file}"]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
continue
patch_contents = result.stdout
# Revert changes to get the file with only this function updated
# (applying the patch might fail if it contains a change for the next function declaraion)
shutil.copy(test_file, temp_test_file)
cmd = ["patch", "-R", "-p1", "--no-backup-if-mismatch", "-r", "/dev/null", temp_test_file]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(
cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=patch_contents
)
print(f"result: {result.returncode}; {result.stdout}")
if result.returncode != 0:
continue
# Ignore whitespace-only changes
cmd = ["diff", "-w", test_file, temp_test_file]
print(f"Running: {' '.join(cmd)}...")
result = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode == 0:
continue
all_updated_tests.append(f"{test_file}::{func_name}")
if temp_file:
temp_file.close()
if os.path.exists(temp_test_file):
os.remove(temp_test_file)
if github_output := os.getenv("GITHUB_OUTPUT"):
with open(github_output, "a") as f:
if all_new_tests or all_updated_tests:
f.write("tests=")
f.write(" ".join(all_new_tests + all_updated_tests))
f.write("\n")

View File

@@ -266,7 +266,7 @@ jobs:
role-duration-seconds: 18000 # 5 hours
- name: Run rust tests
if: ${{ inputs.sanitizers != 'enabled' }}
if: ${{ inputs.sanitizers != 'enabled' && inputs.test-selection == '' }}
env:
NEXTEST_RETRIES: 3
run: |
@@ -386,7 +386,8 @@ jobs:
timeout-minutes: ${{ inputs.sanitizers != 'enabled' && 75 || 180 }}
with:
build_type: ${{ inputs.build-type }}
test_selection: regress
test_selection: ${{ inputs.test-selection != '' && inputs.test-selection || 'regress' }}
extended_testing: ${{ inputs.test-selection != '' && 'true' || 'false' }}
needs_postgres_source: true
run_with_real_s3: true
real_s3_bucket: neon-github-ci-tests
@@ -399,9 +400,7 @@ jobs:
# Attempt to stop tests gracefully to generate test reports
# until they are forcibly stopped by the stricter `timeout-minutes` limit.
extra_params: --session-timeout=${{ inputs.sanitizers != 'enabled' && 3000 || 10200 }} --count=${{ inputs.test-run-count }}
${{ inputs.test-selection != '' && format('-k "{0}"', inputs.test-selection) || '' }}
env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
BUILD_TAG: ${{ inputs.build-tag }}
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring

View File

@@ -199,6 +199,12 @@ jobs:
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
secrets: inherit
build-and-test-new-tests:
needs: [ meta, build-build-tools-image ]
if: github.event_name == 'pull_request'
uses: ./.github/workflows/build_and_test_tests.yml
secrets: inherit
build-and-test-locally:
needs: [ meta, build-build-tools-image ]
# We do need to run this in `.*-rc-pr` because of hotfixes.

View File

@@ -1,10 +1,10 @@
name: Build and Run Selected Test
name: Build and Run Selected Tests
on:
workflow_dispatch:
inputs:
test-selection:
description: 'Specification of selected test(s), as accepted by pytest -k'
description: 'Specification of selected test(s), e. g.: test_runner/regress/test_pg_regress.py::test_pg_regress'
required: true
type: string
run-count:
@@ -26,6 +26,8 @@ on:
default: '[{"pg_version":"v17"}]'
required: true
type: string
workflow_call:
pull_request: # TODO: remove before merge
defaults:
run:
@@ -42,26 +44,71 @@ jobs:
github-event-name: ${{ github.event_name }}
github-event-json: ${{ toJSON(github.event) }}
build-and-test-locally:
needs: [ meta ]
choose-test-parameters:
runs-on: [ self-hosted, small ]
container:
image: ghcr.io/neondatabase/build-tools:pinned-bookworm
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
options: --init
outputs:
tests: ${{ inputs.test-selection != '' && inputs.test-selection || steps.detect_tests_to_test.outputs.tests }}
archs: ${{ inputs.test-selection != '' && inputs.archs || '["x64", "arm64"]' }}
build-types: ${{ inputs.test-selection != '' && inputs.build-types || '["release"]' }}
pg-versions: ${{ inputs.test-selection != '' && inputs.pg-versions || '[{"pg_version":"v14"}, {"pg_version":"v17"}]' }}
run-count: ${{ inputs.test-selection != '' && inputs.run-count || 5 }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: inputs.test-selection == ''
with:
submodules: false
clean: false
fetch-depth: 1000
- name: Cache poetry deps
if: inputs.test-selection == ''
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry/virtualenvs
key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-bookworm-${{ hashFiles('poetry.lock') }}
- name: Install Python deps
if: inputs.test-selection == ''
shell: bash -euxo pipefail {0}
run: ./scripts/pysync
- name: Detect new and updated tests
id: detect_tests_to_test
if: github.event.pull_request.head.sha && inputs.test-selection == ''
env:
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }}
run: python3 .github/scripts/detect-updated-pytests.py
build-and-test-tests:
needs: [ meta, choose-test-parameters ]
if: needs.choose-test-parameters.outputs.tests != ''
strategy:
fail-fast: false
matrix:
arch: ${{ fromJson(inputs.archs) }}
build-type: ${{ fromJson(inputs.build-types) }}
arch: ${{ fromJson(needs.choose-test-parameters.outputs.archs) }}
build-type: ${{ fromJson(needs.choose-test-parameters.outputs.build-types) }}
uses: ./.github/workflows/_build-and-test-locally.yml
with:
arch: ${{ matrix.arch }}
build-tools-image: ghcr.io/neondatabase/build-tools:pinned-bookworm
build-tag: ${{ needs.meta.outputs.build-tag }}
build-type: ${{ matrix.build-type }}
test-cfg: ${{ inputs.pg-versions }}
test-selection: ${{ inputs.test-selection }}
test-run-count: ${{ fromJson(inputs.run-count) }}
test-cfg: ${{ needs.choose-test-parameters.outputs.pg-versions }}
test-selection: ${{ needs.choose-test-parameters.outputs.tests }}
test-run-count: ${{ fromJson(needs.choose-test-parameters.outputs.run-count) }}
secrets: inherit
create-test-report:
needs: [ build-and-test-locally ]
needs: [ build-and-test-tests ]
if: ${{ !cancelled() }}
permissions:
id-token: write # aws-actions/configure-aws-credentials
@@ -96,6 +143,7 @@ jobs:
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_DEV }}
REPORT_EXT: '-ext'
- uses: actions/github-script@v7
if: ${{ !cancelled() }}

View File

@@ -0,0 +1,11 @@
"""Test for detecting new flaky tests"""
import random
def test_flaky1():
assert random.random() > 0.05
def no_test_flaky2():
assert random.random() > 0.05

View File

@@ -11,6 +11,9 @@ if TYPE_CHECKING:
# Test that pageserver and safekeeper can restart quickly.
# This is a regression test, see https://github.com/neondatabase/neon/issues/2247
def test_fixture_restart(neon_env_builder: NeonEnvBuilder):
import random
assert random.random() > 0.05
env = neon_env_builder.init_start()
for _ in range(3):
@@ -20,3 +23,9 @@ def test_fixture_restart(neon_env_builder: NeonEnvBuilder):
for _ in range(3):
env.safekeepers[0].stop()
env.safekeepers[0].start()
def test_flaky3():
import random
assert random.random() > 0.05