Compare commits

..

1 Commits

Author SHA1 Message Date
Tristan Partin
e684d078d7 Fix variable substitution in compute-node.Dockerfile
Signed-off-by: Tristan Partin <tristan.partin@databricks.com>
2025-06-23 21:07:26 +00:00
26 changed files with 112 additions and 214 deletions

View File

@@ -54,7 +54,7 @@ runs:
LOCK_FILE=reports/${BRANCH_OR_PR}/lock.txt
WORKDIR=/__w/_temp/${BRANCH_OR_PR}-$(date +%s)
WORKDIR=/tmp/${BRANCH_OR_PR}-$(date +%s)
mkdir -p ${WORKDIR}
echo "BRANCH_OR_PR=${BRANCH_OR_PR}" >> $GITHUB_ENV
@@ -70,7 +70,7 @@ runs:
- name: Install Allure
shell: bash -euxo pipefail {0}
working-directory: /__w/_temp
working-directory: /tmp
run: |
if ! which allure; then
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip

View File

@@ -33,7 +33,7 @@ runs:
shell: bash -euxo pipefail {0}
env:
TARGET: ${{ inputs.path }}
ARCHIVE: /__w/_temp/downloads/${{ inputs.name }}.tar.zst
ARCHIVE: /tmp/downloads/${{ inputs.name }}.tar.zst
SKIP_IF_DOES_NOT_EXIST: ${{ inputs.skip-if-does-not-exist }}
PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}/{2}', github.event.pull_request.head.sha || github.sha, github.run_id, github.run_attempt) }}
run: |
@@ -61,7 +61,7 @@ runs:
shell: bash -euxo pipefail {0}
env:
TARGET: ${{ inputs.path }}
ARCHIVE: /__w/_temp/downloads/${{ inputs.name }}.tar.zst
ARCHIVE: /tmp/downloads/${{ inputs.name }}.tar.zst
run: |
mkdir -p ${TARGET}
time tar -xf ${ARCHIVE} -C ${TARGET}

View File

@@ -40,11 +40,11 @@ inputs:
psql_path:
description: 'Path to psql binary - it is caller responsibility to provision the psql binary'
required: false
default: '/__w/_temp/neon/pg_install/v16/bin/psql'
default: '/tmp/neon/pg_install/v16/bin/psql'
libpq_lib_path:
description: 'Path to directory containing libpq library - it is caller responsibility to provision the libpq library'
required: false
default: '/__w/_temp/neon/pg_install/v16/lib'
default: '/tmp/neon/pg_install/v16/lib'
project_settings:
description: 'A JSON object with project settings'
required: false
@@ -83,7 +83,7 @@ runs:
\"settings\": ${PROJECT_SETTINGS}
}
}")
code=${res: -3}
if [[ ${code} -ge 400 ]]; then
echo Request failed with error code ${code}
@@ -135,7 +135,7 @@ runs:
-H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer ${ADMIN_API_KEY}" \
-d "{\"scheduling\": \"Essential\"}"
fi
env:
API_HOST: ${{ inputs.api_host }}

View File

@@ -65,7 +65,7 @@ runs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}${{ inputs.sanitizers == 'enabled' && '-sanitized' || '' }}-artifact
path: /__w/_temp/neon
path: /tmp/neon
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}
- name: Download Neon binaries for the previous release
@@ -73,7 +73,7 @@ runs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
path: /__w/_temp/neon-previous
path: /tmp/neon-previous
prefix: latest
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}
@@ -82,7 +82,7 @@ runs:
uses: ./.github/actions/download
with:
name: compatibility-snapshot-${{ runner.arch }}-${{ inputs.build_type }}-pg${{ inputs.pg_version }}
path: /__w/_temp/compatibility_snapshot_pg${{ inputs.pg_version }}
path: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
prefix: latest
# The lack of compatibility snapshot (for example, for the new Postgres version)
# shouldn't fail the whole job. Only relevant test should fail.
@@ -107,12 +107,12 @@ runs:
- name: Run pytest
env:
NEON_BIN: /__w/_temp/neon/bin
COMPATIBILITY_NEON_BIN: /__w/_temp/neon-previous/bin
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /__w/_temp/neon-previous/pg_install
TEST_OUTPUT: /__w/_temp/test_output
NEON_BIN: /tmp/neon/bin
COMPATIBILITY_NEON_BIN: /tmp/neon-previous/bin
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: ${{ inputs.build_type }}
COMPATIBILITY_SNAPSHOT_DIR: /__w/_temp/compatibility_snapshot_pg${{ inputs.pg_version }}
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
RERUN_FAILED: ${{ inputs.rerun_failed }}
PG_VERSION: ${{ inputs.pg_version }}
SANITIZERS: ${{ inputs.sanitizers }}
@@ -121,7 +121,7 @@ runs:
# PLATFORM will be embedded in the perf test report
# and it is needed to distinguish different environments
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/__w/_temp/neon/pg_install}
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
export DEFAULT_PG_VERSION=${PG_VERSION#v}
export LD_LIBRARY_PATH=${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/lib
export BENCHMARK_CONNSTR=${BENCHMARK_CONNSTR:-}
@@ -176,7 +176,7 @@ runs:
fi
if [[ $BUILD_TYPE == "debug" && $RUNNER_ARCH == 'X64' ]]; then
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/__w/_temp/coverage run)
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
else
cov_prefix=()
fi
@@ -224,7 +224,7 @@ runs:
with:
name: compatibility-snapshot-${{ runner.arch }}-${{ inputs.build_type }}-pg${{ inputs.pg_version }}
# Directory is created by test_compatibility.py::test_create_snapshot, keep the path in sync with the test
path: /__w/_temp/test_output/compatibility_snapshot_pg${{ inputs.pg_version }}/
path: /tmp/test_output/compatibility_snapshot_pg${{ inputs.pg_version }}/
# The lack of compatibility snapshot shouldn't fail the job
# (for example if we didn't run the test for non build-and-test workflow)
skip-if-does-not-exist: true
@@ -241,6 +241,6 @@ runs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-store
with:
report-dir: /__w/_temp/test_output/allure/results
report-dir: /tmp/test_output/allure/results
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}-${{ runner.arch }}
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}

View File

@@ -6,13 +6,13 @@ runs:
steps:
- name: Merge coverage data
shell: bash -euxo pipefail {0}
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/__w/_temp/coverage merge
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
- name: Download previous coverage data into the same directory
uses: ./.github/actions/download
with:
name: coverage-data-artifact
path: /__w/_temp/coverage
path: /tmp/coverage
skip-if-does-not-exist: true # skip if there's no previous coverage to download
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}
@@ -20,5 +20,5 @@ runs:
uses: ./.github/actions/upload
with:
name: coverage-data-artifact
path: /__w/_temp/coverage
path: /tmp/coverage
aws-oidc-role-arn: ${{ inputs.aws-oidc-role-arn }}

View File

@@ -27,7 +27,7 @@ runs:
shell: bash -euxo pipefail {0}
env:
SOURCE: ${{ inputs.path }}
ARCHIVE: /__w/_temp/uploads/${{ inputs.name }}.tar.zst
ARCHIVE: /tmp/uploads/${{ inputs.name }}.tar.zst
SKIP_IF_DOES_NOT_EXIST: ${{ inputs.skip-if-does-not-exist }}
run: |
mkdir -p $(dirname $ARCHIVE)
@@ -69,7 +69,7 @@ runs:
shell: bash -euxo pipefail {0}
env:
SOURCE: ${{ inputs.path }}
ARCHIVE: /__w/_temp/uploads/${{ inputs.name }}.tar.zst
ARCHIVE: /tmp/uploads/${{ inputs.name }}.tar.zst
PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}/{2}', github.event.pull_request.head.sha || github.sha, github.run_id , github.run_attempt) }}
run: |
BUCKET=neon-github-public-dev

View File

@@ -24,9 +24,9 @@ jobs:
database: [ clickbench, tpch, userexample ]
env:
LD_LIBRARY_PATH: /__w/_temp/neon/pg_install/v16/lib
LD_LIBRARY_PATH: /tmp/neon/pg_install/v16/lib
PLATFORM: ${{ matrix.platform }}
PG_BINARIES: /__w/_temp/neon/pg_install/v16/bin
PG_BINARIES: /tmp/neon/pg_install/v16/bin
runs-on: [ self-hosted, us-east-2, x64 ]
container:
@@ -79,7 +79,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -127,13 +127,13 @@ jobs:
echo "Database ${{ env.DATABASE_NAME }} already exists."
fi
- name: Download dump from S3 to /__w/_temp/dumps
- name: Download dump from S3 to /tmp/dumps
if: steps.check-restore-done.outputs.skip != 'true'
env:
DATABASE_NAME: ${{ matrix.database }}
run: |
mkdir -p /__w/_temp/dumps
aws s3 cp s3://neon-github-dev/performance/pgdumps/$DATABASE_NAME/$DATABASE_NAME.pg_dump /__w/_temp/dumps/
mkdir -p /tmp/dumps
aws s3 cp s3://neon-github-dev/performance/pgdumps/$DATABASE_NAME/$DATABASE_NAME.pg_dump /tmp/dumps/
- name: Replace database name in connection string
if: steps.check-restore-done.outputs.skip != 'true'
@@ -166,7 +166,7 @@ jobs:
# available in RDS, so we will always report an error, but we can ignore it
run: |
${PG_BINARIES}/pg_restore --clean --if-exists --no-owner --jobs=4 \
-d "${DATABASE_CONNSTR}" /__w/_temp/dumps/${DATABASE_NAME}.pg_dump || true
-d "${DATABASE_CONNSTR}" /tmp/dumps/${DATABASE_NAME}.pg_dump || true
- name: Update benchmark_restore_status table
if: steps.check-restore-done.outputs.skip != 'true'

View File

@@ -118,7 +118,7 @@ jobs:
run: |
CARGO_FLAGS="--locked --features testing"
if [[ $BUILD_TYPE == "debug" && $ARCH == 'x64' ]]; then
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/__w/_temp/coverage run"
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
CARGO_PROFILE=""
elif [[ $BUILD_TYPE == "debug" ]]; then
cov_prefix=""
@@ -209,23 +209,23 @@ jobs:
SANITIZERS: ${{ inputs.sanitizers }}
run: |
# Install target binaries
mkdir -p /__w/_temp/neon/bin/
mkdir -p /tmp/neon/bin/
binaries=$(
${cov_prefix} cargo metadata $CARGO_FLAGS --format-version=1 --no-deps |
jq -r '.packages[].targets[] | select(.kind | index("bin")) | .name'
)
for bin in $binaries; do
SRC=target/$BUILD_TYPE/$bin
DST=/__w/_temp/neon/bin/$bin
DST=/tmp/neon/bin/$bin
cp "$SRC" "$DST"
done
# Install test executables and write list of all binaries (for code coverage)
if [[ $BUILD_TYPE == "debug" && $ARCH == 'x64' && $SANITIZERS != 'enabled' ]]; then
# Keep bloated coverage data files away from the rest of the artifact
mkdir -p /__w/_temp/coverage/
mkdir -p /tmp/coverage/
mkdir -p /__w/_temp/neon/test_bin/
mkdir -p /tmp/neon/test_bin/
test_exe_paths=$(
${cov_prefix} cargo test $CARGO_FLAGS $CARGO_PROFILE --message-format=json --no-run |
@@ -233,16 +233,16 @@ jobs:
)
for bin in $test_exe_paths; do
SRC=$bin
DST=/__w/_temp/neon/test_bin/$(basename $bin)
DST=/tmp/neon/test_bin/$(basename $bin)
# We don't need debug symbols for code coverage, so strip them out to make
# the artifact smaller.
strip "$SRC" -o "$DST"
echo "$DST" >> /__w/_temp/coverage/binaries.list
echo "$DST" >> /tmp/coverage/binaries.list
done
for bin in $binaries; do
echo "/__w/_temp/neon/bin/$bin" >> /__w/_temp/coverage/binaries.list
echo "/tmp/neon/bin/$bin" >> /tmp/coverage/binaries.list
done
fi
@@ -300,24 +300,24 @@ jobs:
build/*/src/test/regress/pg_regress \
build/*/src/test/isolation/isolationtester \
build/*/src/test/isolation/pg_isolation_regress \
| tar x -C /__w/_temp/neon
| tar x -C /tmp/neon
- name: Upload Neon artifact
uses: ./.github/actions/upload
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}${{ inputs.sanitizers == 'enabled' && '-sanitized' || '' }}-artifact
path: /__w/_temp/neon
path: /tmp/neon
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Check diesel schema
if: inputs.build-type == 'release' && inputs.arch == 'x64'
env:
DATABASE_URL: postgresql://localhost:1235/storage_controller
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
run: |
export ASAN_OPTIONS=detect_leaks=0
/__w/_temp/neon/bin/neon_local init
/__w/_temp/neon/bin/neon_local storage_controller start
/tmp/neon/bin/neon_local init
/tmp/neon/bin/neon_local storage_controller start
diesel print-schema > storage_controller/src/schema.rs
@@ -328,7 +328,7 @@ jobs:
exit 1
fi
/__w/_temp/neon/bin/neon_local storage_controller stop
/tmp/neon/bin/neon_local storage_controller stop
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
- name: Merge and upload coverage data

View File

@@ -119,7 +119,7 @@ jobs:
curl -s -X DELETE "$BASE_URL/projects/$project_id" \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--header "Authorization: Bearer ${API_KEY}"
--header "Authorization: Bearer ${API_KEY}"
done
else
echo "Dry run enabled — no projects were deleted."
@@ -149,9 +149,9 @@ jobs:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.PG_VERSION }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: ${{ matrix.PLATFORM }}
@@ -183,7 +183,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -257,9 +257,9 @@ jobs:
statuses: write
id-token: write # aws-actions/configure-aws-credentials
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 17
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: "neon-staging"
@@ -291,7 +291,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -317,9 +317,9 @@ jobs:
statuses: write
id-token: write # aws-actions/configure-aws-credentials
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 16
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: "neon-staging"
@@ -351,7 +351,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -537,9 +537,9 @@ jobs:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.pg_version }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: ${{ matrix.platform }}
@@ -574,7 +574,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -728,9 +728,9 @@ jobs:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "15m"
TEST_PG_BENCH_SCALES_MATRIX: "1"
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.postgres_version }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
@@ -763,7 +763,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -857,9 +857,9 @@ jobs:
matrix: ${{ fromJSON(needs.generate-matrices.outputs.olap-compare-matrix) }}
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.pg_version }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
TEST_OLAP_COLLECT_EXPLAIN: ${{ github.event.inputs.collect_olap_explain }}
TEST_OLAP_COLLECT_PG_STAT_STATEMENTS: ${{ github.event.inputs.collect_pg_stat_statements }}
BUILD_TYPE: remote
@@ -897,7 +897,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -989,9 +989,9 @@ jobs:
matrix: ${{ fromJSON(needs.generate-matrices.outputs.tpch-compare-matrix) }}
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.pg_version }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: ${{ matrix.platform }}
@@ -1023,7 +1023,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -1113,9 +1113,9 @@ jobs:
matrix: ${{ fromJSON(needs.generate-matrices.outputs.olap-compare-matrix) }}
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: ${{ matrix.pg_version }}
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
PLATFORM: ${{ matrix.platform }}
@@ -1147,7 +1147,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -294,8 +294,8 @@ jobs:
run: |
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" \
--days 10 \
--output /__w/_temp/benchmark_durations.json
echo "json=$(jq --compact-output '.' /__w/_temp/benchmark_durations.json)" >> $GITHUB_OUTPUT
--output /tmp/benchmark_durations.json
echo "json=$(jq --compact-output '.' /tmp/benchmark_durations.json)" >> $GITHUB_OUTPUT
benchmarks:
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `deploy` in PRs
@@ -471,32 +471,32 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
path: /__w/_temp/neon
path: /tmp/neon
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Get coverage artifact
uses: ./.github/actions/download
with:
name: coverage-data-artifact
path: /__w/_temp/coverage
path: /tmp/coverage
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Merge coverage data
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/__w/_temp/coverage merge
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
- name: Build coverage report
env:
COMMIT_URL: ${{ github.server_url }}/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha || github.sha }}
run: |
scripts/coverage --dir=/__w/_temp/coverage \
scripts/coverage --dir=/tmp/coverage \
report \
--input-objects=/__w/_temp/coverage/binaries.list \
--input-objects=/tmp/coverage/binaries.list \
--commit-url=${COMMIT_URL} \
--format=github
scripts/coverage --dir=/__w/_temp/coverage \
scripts/coverage --dir=/tmp/coverage \
report \
--input-objects=/__w/_temp/coverage/binaries.list \
--input-objects=/tmp/coverage/binaries.list \
--format=lcov
- name: Build coverage report NEW
@@ -511,7 +511,7 @@ jobs:
CURRENT="${COMMIT_SHA}"
BASELINE="$(git merge-base $BASE_SHA $CURRENT)"
cp /__w/_temp/coverage/report/lcov.info ./${CURRENT}.info
cp /tmp/coverage/report/lcov.info ./${CURRENT}.info
GENHTML_ARGS="--ignore-errors path,unmapped,empty --synthesize-missing --demangle-cpp rustfilt --output-directory lcov-html ${CURRENT}.info"

View File

@@ -27,8 +27,8 @@ permissions:
jobs:
regress:
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
TEST_OUTPUT: /__w/_temp/test_output
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
strategy:
fail-fast: false
@@ -63,7 +63,7 @@ jobs:
LIBS=timescaledb:rag_bge_small_en_v15,rag_jina_reranker_v1_tiny_en:$ULID
settings=$(jq -c -n --arg libs $LIBS '{preload_libraries:{use_defaults:false,enabled_libraries:($libs| split(":"))}}')
echo settings=$settings >> $GITHUB_OUTPUT
- name: Create Neon Project
id: create-neon-project
uses: ./.github/actions/neon-project-create

View File

@@ -27,8 +27,8 @@ permissions:
jobs:
regress:
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
TEST_OUTPUT: /__w/_temp/test_output
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
strategy:
fail-fast: false
@@ -87,7 +87,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -63,9 +63,9 @@ jobs:
statuses: write
id-token: write # aws-actions/configure-aws-credentials
env:
PG_CONFIG: /__w/_temp/neon/pg_install/v16/bin/pg_config
PSQL: /__w/_temp/neon/pg_install/v16/bin/psql
PG_16_LIB_PATH: /__w/_temp/neon/pg_install/v16/lib
PG_CONFIG: /tmp/neon/pg_install/v16/bin/pg_config
PSQL: /tmp/neon/pg_install/v16/bin/psql
PG_16_LIB_PATH: /tmp/neon/pg_install/v16/lib
PGCOPYDB: /pgcopydb/bin/pgcopydb
PGCOPYDB_LIB_PATH: /pgcopydb/lib
runs-on: [ self-hosted, us-east-2, x64 ]
@@ -96,7 +96,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -53,9 +53,9 @@ jobs:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "1h" # todo update to > 1 h
TEST_PGBENCH_CUSTOM_SCRIPTS: ${{ matrix.custom_scripts }}
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: 16 # pre-determined by pre-determined project
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
PLATFORM: ${{ matrix.target }}
@@ -91,7 +91,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -128,9 +128,9 @@ jobs:
if: ${{ matrix.target == 'reuse_branch' }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr_without_pooler }}
PG_CONFIG: /__w/_temp/neon/pg_install/v16/bin/pg_config
PSQL: /__w/_temp/neon/pg_install/v16/bin/psql
PG_16_LIB_PATH: /__w/_temp/neon/pg_install/v16/lib
PG_CONFIG: /tmp/neon/pg_install/v16/bin/pg_config
PSQL: /tmp/neon/pg_install/v16/bin/psql
PG_16_LIB_PATH: /tmp/neon/pg_install/v16/lib
run: |
echo "$(date '+%Y-%m-%d %H:%M:%S') - Deleting rows in table webhook.incoming_webhooks from prior runs"
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}

View File

@@ -85,9 +85,9 @@ jobs:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "1h"
TEST_PGBENCH_CUSTOM_SCRIPTS: ${{ join(matrix.custom_scripts, ' ') }}
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
PG_VERSION: 16 # pre-determined by pre-determined project
TEST_OUTPUT: /__w/_temp/test_output
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
PLATFORM: ${{ matrix.target }}
@@ -118,7 +118,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -101,7 +101,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
@@ -176,7 +176,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -35,7 +35,7 @@ env:
jobs:
run-random-rests:
env:
POSTGRES_DISTRIB_DIR: /__w/_temp/neon/pg_install
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
runs-on: small
permissions:
id-token: write
@@ -64,7 +64,7 @@ jobs:
uses: ./.github/actions/download
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /__w/_temp/neon/
path: /tmp/neon/
prefix: latest
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}

View File

@@ -100,8 +100,8 @@ ARG BULLSEYE_SLIM_SHA=sha256:e831d9a884d63734fe3dd9c491ed9a5a3d4c6a6d32c5b14f206
# If var will match one the known images, we will replace it with the known sha.
# If no match, than value will be unaffected, and will process with no-pinned image.
ARG BASE_IMAGE_SHA=debian:${DEBIAN_FLAVOR}
ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:bookworm-slim/debian@$BOOKWORM_SLIM_SHA}
ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:bullseye-slim/debian@$BULLSEYE_SLIM_SHA}
ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:-bookworm-slim/debian@$BOOKWORM_SLIM_SHA}
ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:-bullseye-slim/debian@$BULLSEYE_SLIM_SHA}
# By default, build all PostgreSQL extensions. For quick local testing when you don't
# care about the extensions, pass EXTENSIONS=none or EXTENSIONS=minimal

View File

@@ -211,8 +211,6 @@ pub struct NeonStorageControllerConf {
pub use_local_compute_notifications: bool,
pub timeline_safekeeper_count: Option<i64>,
pub kick_secondary_downloads: Option<bool>,
}
impl NeonStorageControllerConf {
@@ -244,7 +242,6 @@ impl Default for NeonStorageControllerConf {
use_https_safekeeper_api: false,
use_local_compute_notifications: true,
timeline_safekeeper_count: None,
kick_secondary_downloads: None,
}
}
}

View File

@@ -557,10 +557,6 @@ impl StorageController {
args.push("--use-local-compute-notifications".to_string());
}
if let Some(value) = self.config.kick_secondary_downloads {
args.push(format!("--kick-secondary-downloads={value}"));
}
if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap()));
}

View File

@@ -5,9 +5,6 @@ use std::time::Duration;
use anyhow::{Context, anyhow};
use camino::Utf8PathBuf;
#[cfg(feature = "testing")]
use clap::ArgAction;
use clap::Parser;
use futures::future::OptionFuture;
use http_utils::tls_certs::ReloadingCertificateResolver;
@@ -216,13 +213,6 @@ struct Cli {
/// This option exists primarily for testing purposes.
#[arg(long, default_value = "3", value_parser = clap::value_parser!(i64).range(1..))]
timeline_safekeeper_count: i64,
/// When set, actively checks and initiates heatmap downloads/uploads during reconciliation.
/// This speed up migrations by avoiding the default wait for the heatmap download interval.
/// Primarily useful for testing to reduce test execution time.
#[cfg(feature = "testing")]
#[arg(long, default_value = "true", action=ArgAction::Set)]
kick_secondary_downloads: bool,
}
enum StrictMode {
@@ -455,8 +445,6 @@ async fn async_main() -> anyhow::Result<()> {
timelines_onto_safekeepers: args.timelines_onto_safekeepers,
use_local_compute_notifications: args.use_local_compute_notifications,
timeline_safekeeper_count: args.timeline_safekeeper_count,
#[cfg(feature = "testing")]
kick_secondary_downloads: args.kick_secondary_downloads,
};
// Validate that we can connect to the database

View File

@@ -856,7 +856,6 @@ impl Reconciler {
&self.shard,
&self.config,
&self.placement_policy,
self.intent.secondary.len(),
);
match self.observed.locations.get(&node.get_id()) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
@@ -1236,11 +1235,11 @@ pub(crate) fn attached_location_conf(
shard: &ShardIdentity,
config: &TenantConfig,
policy: &PlacementPolicy,
secondary_count: usize,
) -> LocationConfig {
let has_secondaries = match policy {
PlacementPolicy::Detached | PlacementPolicy::Secondary => false,
PlacementPolicy::Attached(0) => secondary_count > 0,
PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => {
false
}
PlacementPolicy::Attached(_) => true,
};

View File

@@ -470,9 +470,6 @@ pub struct Config {
/// Number of safekeepers to choose for a timeline when creating it.
/// Safekeepers will be choosen from different availability zones.
pub timeline_safekeeper_count: i64,
#[cfg(feature = "testing")]
pub kick_secondary_downloads: bool,
}
impl From<DatabaseError> for ApiError {
@@ -2067,7 +2064,6 @@ impl Service {
&tenant_shard.shard,
&tenant_shard.config,
&PlacementPolicy::Attached(0),
tenant_shard.intent.get_secondary().len(),
)),
},
)]);
@@ -5609,15 +5605,7 @@ impl Service {
for parent_id in parent_ids {
let child_ids = parent_id.split(new_shard_count);
let (
pageserver,
generation,
policy,
parent_ident,
config,
preferred_az,
secondary_count,
) = {
let (pageserver, generation, policy, parent_ident, config, preferred_az) = {
let mut old_state = tenants
.remove(&parent_id)
.expect("It was present, we just split it");
@@ -5637,7 +5625,6 @@ impl Service {
old_state.shard,
old_state.config.clone(),
old_state.preferred_az().cloned(),
old_state.intent.get_secondary().len(),
)
};
@@ -5659,7 +5646,6 @@ impl Service {
&child_shard,
&config,
&policy,
secondary_count,
)),
},
);
@@ -8387,11 +8373,6 @@ impl Service {
/// we have this helper to move things along faster.
#[cfg(feature = "testing")]
async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
if !self.config.kick_secondary_downloads {
// No-op if kick_secondary_downloads functionaliuty is not configured
return;
}
let (attached_node, secondaries) = {
let locked = self.inner.read().unwrap();
let Some(shard) = locked.tenants.get(&tenant_shard_id) else {

View File

@@ -1381,13 +1381,8 @@ impl TenantShard {
.generation
.expect("Attempted to enter attached state without a generation");
let wanted_conf = attached_location_conf(
generation,
&self.shard,
&self.config,
&self.policy,
self.intent.get_secondary().len(),
);
let wanted_conf =
attached_location_conf(generation, &self.shard, &self.config, &self.policy);
match self.observed.locations.get(&node_id) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {}
Some(_) | None => {

View File

@@ -453,7 +453,6 @@ class NeonEnvBuilder:
pageserver_get_vectored_concurrent_io: str | None = None,
pageserver_tracing_config: PageserverTracingConfig | None = None,
pageserver_import_config: PageserverImportConfig | None = None,
storcon_kick_secondary_downloads: bool | None = None,
):
self.repo_dir = repo_dir
self.rust_log_override = rust_log_override
@@ -515,8 +514,6 @@ class NeonEnvBuilder:
self.pageserver_tracing_config = pageserver_tracing_config
self.pageserver_import_config = pageserver_import_config
self.storcon_kick_secondary_downloads = storcon_kick_secondary_downloads
self.pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = (
pageserver_default_tenant_config_compaction_algorithm
)
@@ -1224,14 +1221,6 @@ class NeonEnv:
else:
cfg["storage_controller"] = {"use_local_compute_notifications": False}
if config.storcon_kick_secondary_downloads is not None:
# Configure whether storage controller should actively kick off secondary downloads
if "storage_controller" not in cfg:
cfg["storage_controller"] = {}
cfg["storage_controller"]["kick_secondary_downloads"] = (
config.storcon_kick_secondary_downloads
)
# Create config for pageserver
http_auth_type = "NeonJWT" if config.auth_enabled else "Trust"
pg_auth_type = "NeonJWT" if config.auth_enabled else "Trust"

View File

@@ -4434,53 +4434,6 @@ def test_storage_controller_graceful_migration(neon_env_builder: NeonEnvBuilder,
assert initial_ps.http_client().tenant_list_locations()["tenant_shards"] == []
def test_attached_0_graceful_migration(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_pageservers = 4
neon_env_builder.num_azs = 2
neon_env_builder.storcon_kick_secondary_downloads = False
env = neon_env_builder.init_start()
# It is default, but we want to ensure that there are no secondary locations requested
env.storage_controller.tenant_policy_update(env.initial_tenant, {"placement": {"Attached": 0}})
env.storage_controller.reconcile_until_idle()
desc = env.storage_controller.tenant_describe(env.initial_tenant)["shards"][0]
src_ps_id = desc["node_attached"]
src_ps = env.get_pageserver(src_ps_id)
src_az = desc["preferred_az_id"]
# There must be no secondary locations with Attached(0) placement policy
assert len(desc["node_secondary"]) == 0
# Migrate tenant shard to the same AZ node
dst_ps = [ps for ps in env.pageservers if ps.id != src_ps_id and ps.az_id == src_az][0]
env.storage_controller.tenant_shard_migrate(
TenantShardId(env.initial_tenant, 0, 0),
dst_ps.id,
config=StorageControllerMigrationConfig(prewarm=True),
)
def tenant_shard_migrated():
src_locations = src_ps.http_client().tenant_list_locations()["tenant_shards"]
assert len(src_locations) == 0
log.info(f"Tenant shard migrated from {src_ps.id}")
dst_locations = dst_ps.http_client().tenant_list_locations()["tenant_shards"]
assert len(dst_locations) == 1
assert dst_locations[0][1]["mode"] == "AttachedSingle"
log.info(f"Tenant shard migrated to {dst_ps.id}")
# After all we expect that tenant shard exists only on dst node.
# We wait so long because [`DEFAULT_HEATMAP_PERIOD`] and [`DEFAULT_DOWNLOAD_INTERVAL`]
# are set to 60 seconds by default.
#
# TODO: we should consider making these configurable, so the test can run faster.
wait_until(tenant_shard_migrated, timeout=180, interval=5, status_interval=10)
log.info("Tenant shard migrated successfully")
@run_only_on_default_postgres("this is like a 'unit test' against storcon db")
def test_storage_controller_migrate_with_pageserver_restart(
neon_env_builder: NeonEnvBuilder, make_httpserver