mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-02 18:20:37 +00:00
Compare commits
9 Commits
layer_map_
...
partial_im
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10b90506a0 | ||
|
|
5ee4524caa | ||
|
|
c5245a9e4f | ||
|
|
9f10195d7b | ||
|
|
51aa53ab90 | ||
|
|
2359106a9d | ||
|
|
885033ad42 | ||
|
|
487ec20085 | ||
|
|
898937d500 |
@@ -4,7 +4,7 @@
|
|||||||
hakari-package = "workspace_hack"
|
hakari-package = "workspace_hack"
|
||||||
|
|
||||||
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
||||||
dep-format-version = "3"
|
dep-format-version = "2"
|
||||||
|
|
||||||
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
||||||
# Hakari works much better with the new feature resolver.
|
# Hakari works much better with the new feature resolver.
|
||||||
|
|||||||
@@ -14,8 +14,6 @@
|
|||||||
!pgxn/
|
!pgxn/
|
||||||
!proxy/
|
!proxy/
|
||||||
!safekeeper/
|
!safekeeper/
|
||||||
!storage_broker/
|
|
||||||
!trace/
|
|
||||||
!vendor/postgres-v14/
|
!vendor/postgres-v14/
|
||||||
!vendor/postgres-v15/
|
!vendor/postgres-v15/
|
||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
|
|||||||
6
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
6
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
@@ -10,11 +10,11 @@
|
|||||||
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
||||||
|
|
||||||
### Checklist after release
|
### Checklist after release
|
||||||
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/120/files))
|
||||||
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
||||||
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
||||||
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)
|
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)
|
||||||
- [ ] Check [cloud SLO dashboard](https://neonprod.grafana.net/d/_oWcBMJ7k/cloud-slos?orgId=1)
|
- [ ] Check [cloud SLO dashboard](https://observer.zenith.tech/d/_oWcBMJ7k/cloud-slos?orgId=1)
|
||||||
- [ ] Check [compute startup metrics dashboard](https://neonprod.grafana.net/d/5OkYJEmVz/compute-startup-time)
|
- [ ] Check [compute startup metrics dashboard](https://observer.zenith.tech/d/5OkYJEmVz/compute-startup-time)
|
||||||
|
|
||||||
<!-- List everything that should be done **after** release, any admin UI configuration / Grafana dashboard / alert changes / setting changes / etc -->
|
<!-- List everything that should be done **after** release, any admin UI configuration / Grafana dashboard / alert changes / setting changes / etc -->
|
||||||
|
|||||||
43
.github/actions/allure-report/action.yml
vendored
43
.github/actions/allure-report/action.yml
vendored
@@ -32,8 +32,8 @@ runs:
|
|||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Calculate variables
|
- name: Calculate key
|
||||||
id: calculate-vars
|
id: calculate-key
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
||||||
@@ -41,21 +41,13 @@ runs:
|
|||||||
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
if [ "${pr_number}" != "null" ]; then
|
if [ "${pr_number}" != "null" ]; then
|
||||||
key=pr-${pr_number}
|
key=pr-${pr_number}
|
||||||
elif [ "${GITHUB_REF_NAME}" = "main" ]; then
|
elif [ "${GITHUB_REF}" = "refs/heads/main" ]; then
|
||||||
# Shortcut for a special branch
|
# Shortcut for a special branch
|
||||||
key=main
|
key=main
|
||||||
elif [ "${GITHUB_REF_NAME}" = "release" ]; then
|
|
||||||
# Shortcut for a special branch
|
|
||||||
key=release
|
|
||||||
else
|
else
|
||||||
key=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
|
||||||
fi
|
fi
|
||||||
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
echo "::set-output name=KEY::${key}"
|
||||||
|
|
||||||
# Sanitize test selection to remove `/` and any other special characters
|
|
||||||
# Use printf instead of echo to avoid having `\n` at the end of the string
|
|
||||||
test_selection=$(printf "${{ inputs.test_selection }}" | tr -c "[:alnum:]._-" "-" )
|
|
||||||
echo "TEST_SELECTION=${test_selection}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- uses: actions/setup-java@v3
|
- uses: actions/setup-java@v3
|
||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
@@ -82,11 +74,10 @@ runs:
|
|||||||
- name: Upload Allure results
|
- name: Upload Allure results
|
||||||
if: ${{ inputs.action == 'store' }}
|
if: ${{ inputs.action == 'store' }}
|
||||||
env:
|
env:
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# Add metadata
|
# Add metadata
|
||||||
@@ -107,7 +98,7 @@ runs:
|
|||||||
BUILD_TYPE=${{ inputs.build_type }}
|
BUILD_TYPE=${{ inputs.build_type }}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
ARCHIVE="${GITHUB_RUN_ID}-${TEST_SELECTION}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
ARCHIVE="${GITHUB_RUN_ID}-${{ inputs.test_selection }}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||||
ZSTD_NBTHREADS=0
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
||||||
@@ -118,9 +109,8 @@ runs:
|
|||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
env:
|
env:
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
run: |
|
||||||
LOCK_TIMEOUT=300 # seconds
|
LOCK_TIMEOUT=300 # seconds
|
||||||
|
|
||||||
@@ -133,12 +123,12 @@ runs:
|
|||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" > lock.txt
|
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" > lock.txt
|
||||||
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
|
||||||
# A double-check that exactly WE have acquired the lock
|
# A double-check that exactly WE have acquired the lock
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@@ -147,8 +137,8 @@ runs:
|
|||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
id: generate-report
|
id: generate-report
|
||||||
env:
|
env:
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
@@ -196,19 +186,18 @@ runs:
|
|||||||
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
||||||
|
|
||||||
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
||||||
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
echo "::set-output name=report-url::${REPORT_URL}"
|
||||||
|
|
||||||
- name: Release Allure lock
|
- name: Release Allure lock
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
if: ${{ inputs.action == 'generate' && always() }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
env:
|
env:
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
run: |
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||||
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||||
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
4
.github/actions/download/action.yml
vendored
4
.github/actions/download/action.yml
vendored
@@ -34,7 +34,7 @@ runs:
|
|||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
if [ -z "${S3_KEY}" ]; then
|
if [ -z "${S3_KEY}" ]; then
|
||||||
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
||||||
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
echo '::set-output name=SKIPPED::true'
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
@@ -42,7 +42,7 @@ runs:
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
echo '::set-output name=SKIPPED::false'
|
||||||
|
|
||||||
mkdir -p $(dirname $ARCHIVE)
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}
|
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}
|
||||||
|
|||||||
138
.github/actions/neon-branch-create/action.yml
vendored
138
.github/actions/neon-branch-create/action.yml
vendored
@@ -1,138 +0,0 @@
|
|||||||
name: 'Create Branch'
|
|
||||||
description: 'Create Branch using API'
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
api_key:
|
|
||||||
desctiption: 'Neon API key'
|
|
||||||
required: true
|
|
||||||
project_id:
|
|
||||||
desctiption: 'ID of the Project to create Branch in'
|
|
||||||
required: true
|
|
||||||
api_host:
|
|
||||||
desctiption: 'Neon API host'
|
|
||||||
default: console.stage.neon.tech
|
|
||||||
outputs:
|
|
||||||
dsn:
|
|
||||||
description: 'Created Branch DSN (for main database)'
|
|
||||||
value: ${{ steps.change-password.outputs.dsn }}
|
|
||||||
branch_id:
|
|
||||||
description: 'Created Branch ID'
|
|
||||||
value: ${{ steps.create-branch.outputs.branch_id }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Create New Branch
|
|
||||||
id: create-branch
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
for i in $(seq 1 10); do
|
|
||||||
branch=$(curl \
|
|
||||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches" \
|
|
||||||
--header "Accept: application/json" \
|
|
||||||
--header "Content-Type: application/json" \
|
|
||||||
--header "Authorization: Bearer ${API_KEY}" \
|
|
||||||
--data "{
|
|
||||||
\"branch\": {
|
|
||||||
\"name\": \"Created by actions/neon-branch-create; GITHUB_RUN_ID=${GITHUB_RUN_ID} at $(date +%s)\"
|
|
||||||
},
|
|
||||||
\"endpoints\": [
|
|
||||||
{
|
|
||||||
\"type\": \"read_write\"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}")
|
|
||||||
|
|
||||||
if [ -z "${branch}" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
|
||||||
if [ "${branch_id}" == "null" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
break
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
|
||||||
echo 2>&1 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
|
||||||
echo "branch_id=${branch_id}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
host=$(echo $branch | jq --raw-output '.endpoints[0].host')
|
|
||||||
echo "host=${host}" >> $GITHUB_OUTPUT
|
|
||||||
env:
|
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
|
||||||
|
|
||||||
- name: Get Role name
|
|
||||||
id: role-name
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
roles=$(curl \
|
|
||||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles" \
|
|
||||||
--fail \
|
|
||||||
--header "Accept: application/json" \
|
|
||||||
--header "Content-Type: application/json" \
|
|
||||||
--header "Authorization: Bearer ${API_KEY}"
|
|
||||||
)
|
|
||||||
|
|
||||||
role_name=$(echo $roles | jq --raw-output '.roles[] | select(.protected == false) | .name')
|
|
||||||
echo "role_name=${role_name}" >> $GITHUB_OUTPUT
|
|
||||||
env:
|
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
|
||||||
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
|
||||||
|
|
||||||
- name: Change Password
|
|
||||||
id: change-password
|
|
||||||
# A shell without `set -x` to not to expose password/dsn in logs
|
|
||||||
shell: bash -euo pipefail {0}
|
|
||||||
run: |
|
|
||||||
for i in $(seq 1 10); do
|
|
||||||
reset_password=$(curl \
|
|
||||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles/${ROLE_NAME}/reset_password" \
|
|
||||||
--request POST \
|
|
||||||
--header "Accept: application/json" \
|
|
||||||
--header "Content-Type: application/json" \
|
|
||||||
--header "Authorization: Bearer ${API_KEY}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ -z "${reset_password}" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
password=$(echo $reset_password | jq --raw-output '.role.password')
|
|
||||||
if [ "${password}" == "null" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "::add-mask::${password}"
|
|
||||||
break
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
|
||||||
echo 2>&1 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
dsn="postgres://${ROLE_NAME}:${password}@${HOST}/neondb"
|
|
||||||
echo "::add-mask::${dsn}"
|
|
||||||
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
|
||||||
env:
|
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
|
||||||
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
|
||||||
ROLE_NAME: ${{ steps.role-name.outputs.role_name }}
|
|
||||||
HOST: ${{ steps.create-branch.outputs.host }}
|
|
||||||
58
.github/actions/neon-branch-delete/action.yml
vendored
58
.github/actions/neon-branch-delete/action.yml
vendored
@@ -1,58 +0,0 @@
|
|||||||
name: 'Delete Branch'
|
|
||||||
description: 'Delete Branch using API'
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
api_key:
|
|
||||||
desctiption: 'Neon API key'
|
|
||||||
required: true
|
|
||||||
project_id:
|
|
||||||
desctiption: 'ID of the Project which should be deleted'
|
|
||||||
required: true
|
|
||||||
branch_id:
|
|
||||||
desctiption: 'ID of the branch to delete'
|
|
||||||
required: true
|
|
||||||
api_host:
|
|
||||||
desctiption: 'Neon API host'
|
|
||||||
default: console.stage.neon.tech
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Delete Branch
|
|
||||||
# Do not try to delete a branch if .github/actions/neon-project-create
|
|
||||||
# or .github/actions/neon-branch-create failed before
|
|
||||||
if: ${{ inputs.project_id != '' && inputs.branch_id != '' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
for i in $(seq 1 10); do
|
|
||||||
deleted_branch=$(curl \
|
|
||||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}" \
|
|
||||||
--request DELETE \
|
|
||||||
--header "Accept: application/json" \
|
|
||||||
--header "Content-Type: application/json" \
|
|
||||||
--header "Authorization: Bearer ${API_KEY}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ -z "${deleted_branch}" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
branch_id=$(echo $deleted_branch | jq --raw-output '.branch.id')
|
|
||||||
if [ "${branch_id}" == "null" ]; then
|
|
||||||
sleep 1
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
break
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
|
||||||
echo 2>&1 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
env:
|
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
|
||||||
BRANCH_ID: ${{ inputs.branch_id }}
|
|
||||||
56
.github/actions/neon-project-create/action.yml
vendored
56
.github/actions/neon-project-create/action.yml
vendored
@@ -5,16 +5,12 @@ inputs:
|
|||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
desctiption: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
|
environment:
|
||||||
|
desctiption: 'dev (aka captest) or stage'
|
||||||
|
required: true
|
||||||
region_id:
|
region_id:
|
||||||
desctiption: 'Region ID, if not set the project will be created in the default region'
|
desctiption: 'Region ID, if not set the project will be created in the default region'
|
||||||
default: aws-us-east-2
|
required: false
|
||||||
postgres_version:
|
|
||||||
desctiption: 'Postgres version; default is 15'
|
|
||||||
default: 15
|
|
||||||
api_host:
|
|
||||||
desctiption: 'Neon API host'
|
|
||||||
default: console.stage.neon.tech
|
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
description: 'Created Project DSN (for main database)'
|
description: 'Created Project DSN (for main database)'
|
||||||
@@ -26,13 +22,38 @@ outputs:
|
|||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
|
- name: Parse Input
|
||||||
|
id: parse-input
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
case "${ENVIRONMENT}" in
|
||||||
|
dev)
|
||||||
|
API_HOST=console.dev.neon.tech
|
||||||
|
REGION_ID=${REGION_ID:-eu-west-1}
|
||||||
|
;;
|
||||||
|
staging)
|
||||||
|
API_HOST=console.stage.neon.tech
|
||||||
|
REGION_ID=${REGION_ID:-us-east-1}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "::set-output name=api_host::${API_HOST}"
|
||||||
|
echo "::set-output name=region_id::${REGION_ID}"
|
||||||
|
env:
|
||||||
|
ENVIRONMENT: ${{ inputs.environment }}
|
||||||
|
REGION_ID: ${{ inputs.region_id }}
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
# A shell without `set -x` to not to expose password/dsn in logs
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
shell: bash -euo pipefail {0}
|
shell: bash -euo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
project=$(curl \
|
project=$(curl \
|
||||||
"https://${API_HOST}/api/v2/projects" \
|
"https://${API_HOST}/api/v1/projects" \
|
||||||
--fail \
|
--fail \
|
||||||
--header "Accept: application/json" \
|
--header "Accept: application/json" \
|
||||||
--header "Content-Type: application/json" \
|
--header "Content-Type: application/json" \
|
||||||
@@ -40,7 +61,7 @@ runs:
|
|||||||
--data "{
|
--data "{
|
||||||
\"project\": {
|
\"project\": {
|
||||||
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||||
\"pg_version\": ${POSTGRES_VERSION},
|
\"platform_id\": \"aws\",
|
||||||
\"region_id\": \"${REGION_ID}\",
|
\"region_id\": \"${REGION_ID}\",
|
||||||
\"settings\": { }
|
\"settings\": { }
|
||||||
}
|
}
|
||||||
@@ -49,16 +70,13 @@ runs:
|
|||||||
# Mask password
|
# Mask password
|
||||||
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
||||||
|
|
||||||
dsn=$(echo $project | jq --raw-output '.connection_uris[0].connection_uri')
|
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
|
||||||
echo "::add-mask::${dsn}"
|
echo "::add-mask::${dsn}"
|
||||||
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
echo "::set-output name=dsn::${dsn}"
|
||||||
|
|
||||||
project_id=$(echo $project | jq --raw-output '.project.id')
|
project_id=$(echo $project | jq --raw-output '.id')
|
||||||
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
echo "::set-output name=project_id::${project_id}"
|
||||||
|
|
||||||
echo "Project ${project_id} has been created"
|
|
||||||
env:
|
env:
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
REGION_ID: ${{ inputs.region_id }}
|
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||||
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
REGION_ID: ${{ steps.parse-input.outputs.region_id }}
|
||||||
|
|||||||
49
.github/actions/neon-project-delete/action.yml
vendored
49
.github/actions/neon-project-delete/action.yml
vendored
@@ -5,31 +5,50 @@ inputs:
|
|||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
desctiption: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
|
environment:
|
||||||
|
desctiption: 'dev (aka captest) or stage'
|
||||||
|
required: true
|
||||||
project_id:
|
project_id:
|
||||||
desctiption: 'ID of the Project to delete'
|
desctiption: 'ID of the Project to delete'
|
||||||
required: true
|
required: true
|
||||||
api_host:
|
|
||||||
desctiption: 'Neon API host'
|
|
||||||
default: console.stage.neon.tech
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Delete Neon Project
|
- name: Parse Input
|
||||||
# Do not try to delete a project if .github/actions/neon-project-create failed before
|
id: parse-input
|
||||||
if: ${{ inputs.project_id != '' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
curl \
|
case "${ENVIRONMENT}" in
|
||||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}" \
|
dev)
|
||||||
--fail \
|
API_HOST=console.dev.neon.tech
|
||||||
--request DELETE \
|
;;
|
||||||
--header "Accept: application/json" \
|
staging)
|
||||||
--header "Content-Type: application/json" \
|
API_HOST=console.stage.neon.tech
|
||||||
--header "Authorization: Bearer ${API_KEY}"
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
echo "Project ${PROJECT_ID} has been deleted"
|
echo "::set-output name=api_host::${API_HOST}"
|
||||||
|
env:
|
||||||
|
ENVIRONMENT: ${{ inputs.environment }}
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# Allow PROJECT_ID to be empty/null for cases when .github/actions/neon-project-create failed
|
||||||
|
if [ -n "${PROJECT_ID}" ]; then
|
||||||
|
curl -X "POST" \
|
||||||
|
"https://${API_HOST}/api/v1/projects/${PROJECT_ID}/delete" \
|
||||||
|
--fail \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
|
fi
|
||||||
env:
|
env:
|
||||||
API_HOST: ${{ inputs.api_host }}
|
|
||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||||
|
|||||||
41
.github/actions/run-python-test-set/action.yml
vendored
41
.github/actions/run-python-test-set/action.yml
vendored
@@ -55,22 +55,6 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
|
||||||
- name: Download Neon binaries for the previous release
|
|
||||||
if: inputs.build_type != 'remote'
|
|
||||||
uses: ./.github/actions/download
|
|
||||||
with:
|
|
||||||
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
|
||||||
path: /tmp/neon-previous
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Download compatibility snapshot for Postgres 14
|
|
||||||
if: inputs.build_type != 'remote'
|
|
||||||
uses: ./.github/actions/download
|
|
||||||
with:
|
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14
|
|
||||||
path: /tmp/compatibility_snapshot_pg14
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -92,15 +76,10 @@ runs:
|
|||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
NEON_BIN: /tmp/neon/bin
|
NEON_BIN: /tmp/neon/bin
|
||||||
COMPATIBILITY_NEON_BIN: /tmp/neon-previous/bin
|
|
||||||
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
||||||
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg14
|
|
||||||
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
|
||||||
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# PLATFORM will be embedded in the perf test report
|
# PLATFORM will be embedded in the perf test report
|
||||||
@@ -123,12 +102,7 @@ runs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
||||||
# -n16 uses sixteen processes to run tests via pytest-xdist
|
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
||||||
EXTRA_PARAMS="-n16 $EXTRA_PARAMS"
|
|
||||||
|
|
||||||
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
|
||||||
# to the same worker to make @pytest.mark.order work with xdist
|
|
||||||
EXTRA_PARAMS="--dist=loadgroup $EXTRA_PARAMS"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
||||||
@@ -163,9 +137,9 @@ runs:
|
|||||||
# --verbose prints name of each test (helpful when there are
|
# --verbose prints name of each test (helpful when there are
|
||||||
# multiple tests in one file)
|
# multiple tests in one file)
|
||||||
# -rA prints summary in the end
|
# -rA prints summary in the end
|
||||||
|
# -n4 uses four processes to run tests via pytest-xdist
|
||||||
# -s is not used to prevent pytest from capturing output, because tests are running
|
# -s is not used to prevent pytest from capturing output, because tests are running
|
||||||
# in parallel and logs are mixed between different tests
|
# in parallel and logs are mixed between different tests
|
||||||
#
|
|
||||||
mkdir -p $TEST_OUTPUT/allure/results
|
mkdir -p $TEST_OUTPUT/allure/results
|
||||||
"${cov_prefix[@]}" ./scripts/pytest \
|
"${cov_prefix[@]}" ./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
@@ -180,17 +154,8 @@ runs:
|
|||||||
scripts/generate_and_push_perf_report.sh
|
scripts/generate_and_push_perf_report.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload compatibility snapshot for Postgres 14
|
|
||||||
if: github.ref_name == 'release'
|
|
||||||
uses: ./.github/actions/upload
|
|
||||||
with:
|
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14-${{ github.run_id }}
|
|
||||||
# The path includes a test name (test_create_snapshot) and directory that the test creates (compatibility_snapshot_pg14), keep the path in sync with the test
|
|
||||||
path: /tmp/test_output/test_create_snapshot/compatibility_snapshot_pg14/
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: always()
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report
|
||||||
with:
|
with:
|
||||||
action: store
|
action: store
|
||||||
|
|||||||
5
.github/ansible/.gitignore
vendored
5
.github/ansible/.gitignore
vendored
@@ -1,5 +1,4 @@
|
|||||||
|
zenith_install.tar.gz
|
||||||
|
.zenith_current_version
|
||||||
neon_install.tar.gz
|
neon_install.tar.gz
|
||||||
.neon_current_version
|
.neon_current_version
|
||||||
|
|
||||||
collections/*
|
|
||||||
!collections/.keep
|
|
||||||
|
|||||||
47
.github/ansible/deploy.yaml
vendored
47
.github/ansible/deploy.yaml
vendored
@@ -1,7 +1,7 @@
|
|||||||
- name: Upload Neon binaries
|
- name: Upload Neon binaries
|
||||||
hosts: storage
|
hosts: storage
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: "{{ remote_user }}"
|
remote_user: admin
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -14,8 +14,7 @@
|
|||||||
- safekeeper
|
- safekeeper
|
||||||
|
|
||||||
- name: inform about versions
|
- name: inform about versions
|
||||||
debug:
|
debug: msg="Version to deploy - {{ current_version }}"
|
||||||
msg: "Version to deploy - {{ current_version }}"
|
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
- safekeeper
|
- safekeeper
|
||||||
@@ -36,7 +35,7 @@
|
|||||||
- name: Deploy pageserver
|
- name: Deploy pageserver
|
||||||
hosts: pageservers
|
hosts: pageservers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: "{{ remote_user }}"
|
remote_user: admin
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -64,29 +63,15 @@
|
|||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
|
|
||||||
- name: read the existing remote pageserver config
|
- name: update remote storage (s3) config
|
||||||
ansible.builtin.slurp:
|
lineinfile:
|
||||||
src: /storage/pageserver/data/pageserver.toml
|
path: /storage/pageserver/data/pageserver.toml
|
||||||
register: _remote_ps_config
|
line: "{{ item }}"
|
||||||
tags:
|
loop:
|
||||||
- pageserver
|
- "[remote_storage]"
|
||||||
|
- "bucket_name = '{{ bucket_name }}'"
|
||||||
- name: parse the existing pageserver configuration
|
- "bucket_region = '{{ bucket_region }}'"
|
||||||
ansible.builtin.set_fact:
|
- "prefix_in_bucket = '{{ inventory_hostname }}'"
|
||||||
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: construct the final pageserver configuration dict
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: template the pageserver config
|
|
||||||
template:
|
|
||||||
src: templates/pageserver.toml.j2
|
|
||||||
dest: /storage/pageserver/data/pageserver.toml
|
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
@@ -117,15 +102,14 @@
|
|||||||
shell:
|
shell:
|
||||||
cmd: |
|
cmd: |
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/pageservers/$INSTANCE_ID
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/pageservers
|
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
|
|
||||||
- name: Deploy safekeeper
|
- name: Deploy safekeeper
|
||||||
hosts: safekeepers
|
hosts: safekeepers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: "{{ remote_user }}"
|
remote_user: admin
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -187,7 +171,6 @@
|
|||||||
shell:
|
shell:
|
||||||
cmd: |
|
cmd: |
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/safekeepers/$INSTANCE_ID
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/safekeepers
|
|
||||||
tags:
|
tags:
|
||||||
- safekeeper
|
- safekeeper
|
||||||
|
|||||||
2
.github/ansible/get_binaries.sh
vendored
2
.github/ansible/get_binaries.sh
vendored
@@ -23,9 +23,7 @@ docker cp ${ID}:/data/postgres_install.tar.gz .
|
|||||||
tar -xzf postgres_install.tar.gz -C neon_install
|
tar -xzf postgres_install.tar.gz -C neon_install
|
||||||
mkdir neon_install/bin/
|
mkdir neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
||||||
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
||||||
|
|||||||
20
.github/ansible/neon-stress.hosts
vendored
Normal file
20
.github/ansible/neon-stress.hosts
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[pageservers]
|
||||||
|
neon-stress-ps-1 console_region_id=1
|
||||||
|
neon-stress-ps-2 console_region_id=1
|
||||||
|
|
||||||
|
[safekeepers]
|
||||||
|
neon-stress-sk-1 console_region_id=1
|
||||||
|
neon-stress-sk-2 console_region_id=1
|
||||||
|
neon-stress-sk-3 console_region_id=1
|
||||||
|
|
||||||
|
[storage:children]
|
||||||
|
pageservers
|
||||||
|
safekeepers
|
||||||
|
|
||||||
|
[storage:vars]
|
||||||
|
env_name = neon-stress
|
||||||
|
console_mgmt_base_url = http://neon-stress-console.local
|
||||||
|
bucket_name = neon-storage-ireland
|
||||||
|
bucket_region = eu-west-1
|
||||||
|
etcd_endpoints = etcd-stress.local:2379
|
||||||
|
safekeeper_enable_s3_offload = false
|
||||||
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
@@ -1,38 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
bucket_region: ap-southeast-1
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: ap-southeast-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
console_region_id: aws-ap-southeast-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-064de8ea28bdb495b
|
|
||||||
pageserver-1.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b180defcaeeb6b93
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0d6f1dc5161eef894
|
|
||||||
safekeeper-1.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0e338adda8eb2d19f
|
|
||||||
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-04fb63634e4679eb9
|
|
||||||
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
@@ -1,38 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
bucket_region: eu-central-1
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-central-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
console_region_id: aws-eu-central-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0cd8d316ecbb715be
|
|
||||||
pageserver-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-090044ed3d383fef0
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b238612d2318a050
|
|
||||||
safekeeper-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-07b9c45e5c2637cd4
|
|
||||||
safekeeper-2.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-020257302c3c93d88
|
|
||||||
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
@@ -1,39 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.delta.us-east-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-062227ba7f119eb8c
|
|
||||||
pageserver-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0b3ec0afab5968938
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0e94224750c57d346
|
|
||||||
safekeeper-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-06d113fb73bfddeb0
|
|
||||||
safekeeper-2.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-09f66c8e04afff2e8
|
|
||||||
|
|
||||||
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
@@ -1,41 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-west-2
|
|
||||||
bucket_region: us-west-2
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.eta.us-west-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-west-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-west-2
|
|
||||||
console_region_id: aws-us-west-2-new
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0d9f6dfae0e1c780d
|
|
||||||
pageserver-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0c834be1dddba8b3f
|
|
||||||
pageserver-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-051642d372c0a4f32
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-00719d8a74986fda6
|
|
||||||
safekeeper-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-074682f9d3c712e7c
|
|
||||||
safekeeper-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-042b7efb1729d7966
|
|
||||||
|
|
||||||
20
.github/ansible/production.hosts
vendored
Normal file
20
.github/ansible/production.hosts
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[pageservers]
|
||||||
|
#zenith-1-ps-1 console_region_id=1
|
||||||
|
zenith-1-ps-2 console_region_id=1
|
||||||
|
zenith-1-ps-3 console_region_id=1
|
||||||
|
|
||||||
|
[safekeepers]
|
||||||
|
zenith-1-sk-1 console_region_id=1
|
||||||
|
zenith-1-sk-2 console_region_id=1
|
||||||
|
zenith-1-sk-3 console_region_id=1
|
||||||
|
|
||||||
|
[storage:children]
|
||||||
|
pageservers
|
||||||
|
safekeepers
|
||||||
|
|
||||||
|
[storage:vars]
|
||||||
|
env_name = prod-1
|
||||||
|
console_mgmt_base_url = http://console-release.local
|
||||||
|
bucket_name = zenith-storage-oregon
|
||||||
|
bucket_region = us-west-2
|
||||||
|
etcd_endpoints = zenith-1-etcd.local:2379
|
||||||
40
.github/ansible/production.hosts.yaml
vendored
40
.github/ansible/production.hosts.yaml
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
storage:
|
|
||||||
vars:
|
|
||||||
console_mgmt_base_url: http://console-release.local
|
|
||||||
bucket_name: zenith-storage-oregon
|
|
||||||
bucket_region: us-west-2
|
|
||||||
broker_endpoint: http://storage-broker.prod.local:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "{{ inventory_hostname }}"
|
|
||||||
safekeeper_s3_prefix: prod-1/wal
|
|
||||||
hostname_suffix: ".local"
|
|
||||||
remote_user: admin
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
zenith-1-ps-2:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-3:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-4:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-ps-5:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
zenith-1-sk-1:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-sk-2:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
zenith-1-sk-4:
|
|
||||||
console_region_id: aws-us-west-2
|
|
||||||
13
.github/ansible/scripts/init_pageserver.sh
vendored
13
.github/ansible/scripts/init_pageserver.sh
vendored
@@ -1,8 +1,7 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
# fetch params from meta-data service
|
# get instance id from meta-data service
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
# store fqdn hostname in var
|
||||||
HOST=$(hostname -f)
|
HOST=$(hostname -f)
|
||||||
@@ -13,20 +12,18 @@ cat <<EOF | tee /tmp/payload
|
|||||||
"version": 1,
|
"version": 1,
|
||||||
"host": "${HOST}",
|
"host": "${HOST}",
|
||||||
"port": 6400,
|
"port": 6400,
|
||||||
"region_id": "{{ console_region_id }}",
|
"region_id": {{ console_region_id }},
|
||||||
"instance_id": "${INSTANCE_ID}",
|
"instance_id": "${INSTANCE_ID}",
|
||||||
"http_host": "${HOST}",
|
"http_host": "${HOST}",
|
||||||
"http_port": 9898,
|
"http_port": 9898
|
||||||
"active": false,
|
|
||||||
"availability_zone_id": "${AZ_ID}"
|
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# check if pageserver already registered or not
|
# check if pageserver already registered or not
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
|
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
# not registered, so register it now
|
# not registered, so register it now
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
|
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
|
||||||
|
|
||||||
# init pageserver
|
# init pageserver
|
||||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
||||||
|
|||||||
10
.github/ansible/scripts/init_safekeeper.sh
vendored
10
.github/ansible/scripts/init_safekeeper.sh
vendored
@@ -14,18 +14,18 @@ cat <<EOF | tee /tmp/payload
|
|||||||
"host": "${HOST}",
|
"host": "${HOST}",
|
||||||
"port": 6500,
|
"port": 6500,
|
||||||
"http_port": 7676,
|
"http_port": 7676,
|
||||||
"region_id": "{{ console_region_id }}",
|
"region_id": {{ console_region_id }},
|
||||||
"instance_id": "${INSTANCE_ID}",
|
"instance_id": "${INSTANCE_ID}",
|
||||||
"availability_zone_id": "${AZ_ID}",
|
"availability_zone_id": "${AZ_ID}"
|
||||||
"active": false
|
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# check if safekeeper already registered or not
|
# check if safekeeper already registered or not
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
# not registered, so register it now
|
# not registered, so register it now
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
|
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
|
||||||
|
|
||||||
# init safekeeper
|
# init safekeeper
|
||||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
||||||
fi
|
fi
|
||||||
|
|||||||
2
.github/ansible/ssm_config
vendored
2
.github/ansible/ssm_config
vendored
@@ -1,2 +0,0 @@
|
|||||||
ansible_connection: aws_ssm
|
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
|
||||||
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
@@ -1,36 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
bucket_region: eu-west-1
|
|
||||||
console_mgmt_base_url: http://console-staging.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-west-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
console_region_id: aws-eu-west-1
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-01d496c5041c7f34c
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-05226ef85722831bf
|
|
||||||
safekeeper-1.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-06969ee1bf2958bfc
|
|
||||||
safekeeper-2.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-087892e9625984a0b
|
|
||||||
25
.github/ansible/staging.hosts
vendored
Normal file
25
.github/ansible/staging.hosts
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
[pageservers]
|
||||||
|
#zenith-us-stage-ps-1 console_region_id=27
|
||||||
|
zenith-us-stage-ps-2 console_region_id=27
|
||||||
|
zenith-us-stage-ps-3 console_region_id=27
|
||||||
|
zenith-us-stage-ps-4 console_region_id=27
|
||||||
|
zenith-us-stage-test-ps-1 console_region_id=28
|
||||||
|
|
||||||
|
[safekeepers]
|
||||||
|
zenith-us-stage-sk-4 console_region_id=27
|
||||||
|
zenith-us-stage-sk-5 console_region_id=27
|
||||||
|
zenith-us-stage-sk-6 console_region_id=27
|
||||||
|
zenith-us-stage-test-sk-1 console_region_id=28
|
||||||
|
zenith-us-stage-test-sk-2 console_region_id=28
|
||||||
|
zenith-us-stage-test-sk-3 console_region_id=28
|
||||||
|
|
||||||
|
[storage:children]
|
||||||
|
pageservers
|
||||||
|
safekeepers
|
||||||
|
|
||||||
|
[storage:vars]
|
||||||
|
env_name = us-stage
|
||||||
|
console_mgmt_base_url = http://console-staging.local
|
||||||
|
bucket_name = zenith-staging-storage-us-east-1
|
||||||
|
bucket_region = us-east-1
|
||||||
|
etcd_endpoints = zenith-us-stage-etcd.local:2379
|
||||||
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
@@ -1,42 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-staging-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://console-staging.local
|
|
||||||
broker_endpoint: http://storage-broker-lb.beta.us-east-2.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-staging-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0c3e70929edb5d691
|
|
||||||
pageserver-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0565a8b4008aa3f40
|
|
||||||
pageserver-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-01e31cdf7e970586a
|
|
||||||
pageserver-3.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0602a0291365ef7cc
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-027662bd552bf5db0
|
|
||||||
safekeeper-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0171efc3604a7b907
|
|
||||||
safekeeper-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0de0b03a51676a6ce
|
|
||||||
6
.github/ansible/systemd/pageserver.service
vendored
6
.github/ansible/systemd/pageserver.service
vendored
@@ -1,12 +1,12 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Neon pageserver
|
Description=Zenith pageserver
|
||||||
After=network.target auditd.service
|
After=network.target auditd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=pageserver
|
User=pageserver
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_PAGESERVER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib
|
||||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoint='{{ broker_endpoint }}'" -D /storage/pageserver/data
|
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoints=['{{ etcd_endpoints }}']" -D /storage/pageserver/data
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
KillSignal=SIGINT
|
KillSignal=SIGINT
|
||||||
|
|||||||
6
.github/ansible/systemd/safekeeper.service
vendored
6
.github/ansible/systemd/safekeeper.service
vendored
@@ -1,12 +1,12 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Neon safekeeper
|
Description=Zenith safekeeper
|
||||||
After=network.target auditd.service
|
After=network.target auditd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=safekeeper
|
User=safekeeper
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_SAFEKEEPER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib
|
||||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoint={{ broker_endpoint }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ safekeeper_s3_prefix }}"}'
|
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
KillSignal=SIGINT
|
KillSignal=SIGINT
|
||||||
|
|||||||
1
.github/ansible/templates/pageserver.toml.j2
vendored
1
.github/ansible/templates/pageserver.toml.j2
vendored
@@ -1 +0,0 @@
|
|||||||
{{ pageserver_config | sivel.toiletwater.to_toml }}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.eu-west-1.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: eu-west-1
|
|
||||||
zenith_region_slug: eu-west-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-link.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.stage.neon.tech/psql_session/"
|
|
||||||
domain: "pg.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy-link pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.cloud.stage.neon.tech"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram-legacy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.beta.us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.beta.us-east-2.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
26
.github/helm-values/neon-stress.proxy-scram.yaml
vendored
Normal file
26
.github/helm-values/neon-stress.proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
fullnameOverride: "neon-stress-proxy-scram"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://neon-stress-console.local/management/api/v2"
|
||||||
|
domain: "*.stress.neon.tech"
|
||||||
|
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: staging
|
||||||
|
zenith_region: eu-west-1
|
||||||
|
zenith_region_slug: ireland
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: '*.stress.neon.tech'
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
35
.github/helm-values/neon-stress.proxy.yaml
vendored
Normal file
35
.github/helm-values/neon-stress.proxy.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
fullnameOverride: "neon-stress-proxy"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
|
authEndpoint: "https://console.dev.neon.tech/authenticate_proxy_request/"
|
||||||
|
uri: "https://console.dev.neon.tech/psql_session/"
|
||||||
|
|
||||||
|
# -- Additional labels for zenith-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy
|
||||||
|
zenith_env: staging
|
||||||
|
zenith_region: eu-west-1
|
||||||
|
zenith_region_slug: ireland
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-stress-proxy.local
|
||||||
|
type: LoadBalancer
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: connect.dev.neon.tech
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.ap-southeast-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: ap-southeast-1
|
|
||||||
zenith_region_slug: ap-southeast-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: ap-southeast-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.eu-central-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: eu-central-1
|
|
||||||
zenith_region_slug: eu-central-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-central-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-link.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.neon.tech/psql_session/"
|
|
||||||
domain: "pg.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.delta.us-east-2.aws.neon.tech
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.delta.us-east-2.aws.neon.tech
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.delta.us-east-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.cloud.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: us-west-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.eta.us-west-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.us-west-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: us-west-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-west-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.eta.us-west-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker.prod.local
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
24
.github/helm-values/production.proxy-scram.yaml
vendored
Normal file
24
.github/helm-values/production.proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.cloud.neon.tech"
|
||||||
|
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: production
|
||||||
|
zenith_region: us-west-2
|
||||||
|
zenith_region_slug: oregon
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech'
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
33
.github/helm-values/production.proxy.yaml
vendored
Normal file
33
.github/helm-values/production.proxy.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
|
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
||||||
|
uri: "https://console.neon.tech/psql_session/"
|
||||||
|
|
||||||
|
# -- Additional labels for zenith-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy
|
||||||
|
zenith_env: production
|
||||||
|
zenith_region: us-west-2
|
||||||
|
zenith_region_slug: oregon
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: proxy-release.local
|
||||||
|
type: LoadBalancer
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: connect.neon.tech,pg.neon.tech
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
31
.github/helm-values/staging.proxy-scram.yaml
vendored
Normal file
31
.github/helm-values/staging.proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for zenith-proxy.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.cloud.stage.neon.tech"
|
||||||
|
|
||||||
|
# -- Additional labels for zenith-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: staging
|
||||||
|
zenith_region: us-east-1
|
||||||
|
zenith_region_slug: virginia
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: cloud.stage.neon.tech
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
31
.github/helm-values/staging.proxy.yaml
vendored
Normal file
31
.github/helm-values/staging.proxy.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Helm chart values for zenith-proxy.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
|
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||||
|
uri: "https://console.stage.neon.tech/psql_session/"
|
||||||
|
|
||||||
|
# -- Additional labels for zenith-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy
|
||||||
|
zenith_env: staging
|
||||||
|
zenith_region: us-east-1
|
||||||
|
zenith_region_slug: virginia
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: connect.stage.neon.tech
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -1,10 +0,0 @@
|
|||||||
## Describe your changes
|
|
||||||
|
|
||||||
## Issue ticket number and link
|
|
||||||
|
|
||||||
## Checklist before requesting a review
|
|
||||||
- [ ] I have performed a self-review of my code.
|
|
||||||
- [ ] If it is a core feature, I have added thorough tests.
|
|
||||||
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
|
||||||
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
|
||||||
|
|
||||||
521
.github/workflows/benchmarking.yml
vendored
521
.github/workflows/benchmarking.yml
vendored
@@ -15,10 +15,12 @@ on:
|
|||||||
|
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
inputs:
|
inputs:
|
||||||
|
environment:
|
||||||
|
description: 'Environment to run remote tests on (dev or staging)'
|
||||||
|
required: false
|
||||||
region_id:
|
region_id:
|
||||||
description: 'Use a particular region. If not set the default region will be used'
|
description: 'Use a particular region. If not set the default region will be used'
|
||||||
required: false
|
required: false
|
||||||
default: 'aws-us-east-2'
|
|
||||||
save_perf_report:
|
save_perf_report:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
||||||
@@ -35,69 +37,97 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
bench:
|
bench:
|
||||||
env:
|
# this workflow runs on self hosteed runner
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
# it's environment is quite different from usual guthub runner
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
# probably the most important difference is that it doesn't start from clean workspace each time
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
# e g if you install system packages they are not cleaned up since you install them directly in host machine
|
||||||
DEFAULT_PG_VERSION: 14
|
# not a container or something
|
||||||
TEST_OUTPUT: /tmp/test_output
|
# See documentation for more info: https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners
|
||||||
BUILD_TYPE: remote
|
runs-on: [self-hosted, zenith-benchmarker]
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
PLATFORM: "neon-staging"
|
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
env:
|
||||||
container:
|
POSTGRES_DISTRIB_DIR: /tmp/pg_install
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
DEFAULT_PG_VERSION: 14
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- name: Checkout zenith repo
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Download Neon artifact
|
# actions/setup-python@v2 is not working correctly on self-hosted runners
|
||||||
uses: ./.github/actions/download
|
# see https://github.com/actions/setup-python/issues/162
|
||||||
with:
|
# and probably https://github.com/actions/setup-python/issues/162#issuecomment-865387976 in particular
|
||||||
name: neon-${{ runner.os }}-release-artifact
|
# so the simplest solution to me is to use already installed system python and spin virtualenvs for job runs.
|
||||||
path: /tmp/neon/
|
# there is Python 3.7.10 already installed on the machine so use it to install poetry and then use poetry's virtuealenvs
|
||||||
prefix: latest
|
- name: Install poetry & deps
|
||||||
|
run: |
|
||||||
|
python3 -m pip install --upgrade poetry wheel
|
||||||
|
# since pip/poetry caches are reused there shouldn't be any troubles with install every time
|
||||||
|
./scripts/pysync
|
||||||
|
|
||||||
|
- name: Show versions
|
||||||
|
run: |
|
||||||
|
echo Python
|
||||||
|
python3 --version
|
||||||
|
poetry run python3 --version
|
||||||
|
echo Poetry
|
||||||
|
poetry --version
|
||||||
|
echo Pgbench
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
environment: ${{ github.event.inputs.environment || 'staging' }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
api_key: ${{ ( github.event.inputs.environment || 'staging' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
|
|
||||||
- name: Run benchmark
|
- name: Run benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
# pgbench is installed system wide from official repo
|
||||||
with:
|
# https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-7-x86_64/
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
# via
|
||||||
test_selection: performance
|
# sudo tee /etc/yum.repos.d/pgdg.repo<<EOF
|
||||||
run_in_parallel: false
|
# [pgdg13]
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
# name=PostgreSQL 13 for RHEL/CentOS 7 - x86_64
|
||||||
# Set --sparse-ordering option of pytest-order plugin
|
# baseurl=https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-7-x86_64/
|
||||||
# to ensure tests are running in order of appears in the file.
|
# enabled=1
|
||||||
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
# gpgcheck=0
|
||||||
extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py
|
# EOF
|
||||||
|
# sudo yum makecache
|
||||||
|
# sudo yum install postgresql13-contrib
|
||||||
|
# actual binaries are located in /usr/pgsql-13/bin/
|
||||||
env:
|
env:
|
||||||
|
# The pgbench test runs two tests of given duration against each scale.
|
||||||
|
# So the total runtime with these parameters is 2 * 2 * 300 = 1200, or 20 minutes.
|
||||||
|
# Plus time needed to initialize the test databases.
|
||||||
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||||
|
PLATFORM: "neon-staging"
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
|
REMOTE_ENV: "1" # indicate to test harness that we do not have zenith binaries locally
|
||||||
|
run: |
|
||||||
|
# just to be sure that no data was cached on self hosted runner
|
||||||
|
# since it might generate duplicates when calling ingest_perf_test_result.py
|
||||||
|
rm -rf perf-report-staging
|
||||||
|
mkdir -p perf-report-staging
|
||||||
|
# Set --sparse-ordering option of pytest-order plugin to ensure tests are running in order of appears in the file,
|
||||||
|
# it's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
|
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --sparse-ordering --out-dir perf-report-staging --timeout 5400
|
||||||
|
|
||||||
|
- name: Submit result
|
||||||
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
run: |
|
||||||
|
REPORT_FROM=$(realpath perf-report-staging) REPORT_TO=staging scripts/generate_and_push_perf_report.sh
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
with:
|
with:
|
||||||
|
environment: staging
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
|
||||||
if: success() || failure()
|
|
||||||
uses: ./.github/actions/allure-report
|
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
@@ -108,38 +138,26 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
pgbench-compare:
|
pgbench-compare:
|
||||||
|
env:
|
||||||
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: "10gb"
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# neon-captest-new: Run pgbench in a freshly created project
|
# neon-captest-new: Run pgbench in a freshly created project
|
||||||
# neon-captest-reuse: Same, but reusing existing project
|
# neon-captest-reuse: Same, but reusing existing project
|
||||||
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-aurora ]
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-reuse, neon-captest-prefetch, rds-postgres ]
|
|
||||||
db_size: [ 10gb ]
|
|
||||||
runner: [ us-east-2 ]
|
|
||||||
include:
|
|
||||||
- platform: neon-captest-prefetch
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
- platform: rds-aurora
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
|
|
||||||
env:
|
runs-on: dev
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUILD_TYPE: remote
|
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
|
|
||||||
runs-on: [ self-hosted, "${{ matrix.runner }}", x64 ]
|
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rustlegacy:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
timeout-minutes: 360 # 6h
|
||||||
@@ -160,13 +178,12 @@ jobs:
|
|||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
|
if: matrix.platform != 'neon-captest-reuse'
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
environment: ${{ github.event.inputs.environment || 'dev' }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
api_key: ${{ ( github.event.inputs.environment || 'dev' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -179,29 +196,25 @@ jobs:
|
|||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_CONNSTR }}
|
||||||
;;
|
|
||||||
rds-postgres)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch' or 'rds-aurora'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "::set-output name=connstr::${CONNSTR}"
|
||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
- name: Set database options
|
- name: Set database options
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
run: |
|
run: |
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET seqscan_prefetch_buffers=10"
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
@@ -214,6 +227,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
env:
|
env:
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -227,6 +241,7 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
env:
|
env:
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -240,24 +255,26 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
env:
|
env:
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
- name: Delete Neon Project
|
|
||||||
if: ${{ steps.create-neon-project.outputs.project_id && always() }}
|
|
||||||
uses: ./.github/actions/neon-project-delete
|
|
||||||
with:
|
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: always()
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report
|
||||||
with:
|
with:
|
||||||
action: generate
|
action: generate
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
if: ${{ matrix.platform != 'neon-captest-reuse' && always() }}
|
||||||
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
with:
|
||||||
|
environment: dev
|
||||||
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
api_key: ${{ secrets.NEON_CAPTEST_API_KEY }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
@@ -266,331 +283,3 @@ jobs:
|
|||||||
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
clickbench-compare:
|
|
||||||
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
|
||||||
# we use for performance testing in pgbench-compare.
|
|
||||||
# Run this job only when pgbench-compare is finished to avoid the intersection.
|
|
||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
|
||||||
#
|
|
||||||
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
|
||||||
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
|
||||||
if: success() || failure()
|
|
||||||
needs: [ pgbench-compare ]
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUILD_TYPE: remote
|
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Download Neon artifact
|
|
||||||
uses: ./.github/actions/download
|
|
||||||
with:
|
|
||||||
name: neon-${{ runner.os }}-release-artifact
|
|
||||||
path: /tmp/neon/
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Add Postgres binaries to PATH
|
|
||||||
run: |
|
|
||||||
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
|
||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
|
||||||
id: set-up-connstr
|
|
||||||
run: |
|
|
||||||
case "${PLATFORM}" in
|
|
||||||
neon-captest-prefetch)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-aurora)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-postgres)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: ClickBench benchmark
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance/test_perf_olap.py
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_clickbench
|
|
||||||
env:
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
|
||||||
if: success() || failure()
|
|
||||||
uses: ./.github/actions/allure-report
|
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
|
||||||
if: ${{ github.event.schedule && failure() }}
|
|
||||||
uses: slackapi/slack-github-action@v1
|
|
||||||
with:
|
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
||||||
slack-message: "Periodic OLAP perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
||||||
env:
|
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
||||||
|
|
||||||
tpch-compare:
|
|
||||||
# TCP-H DB for rds-aurora and rds-Postgres deployed to the same clusters
|
|
||||||
# we use for performance testing in pgbench-compare & clickbench-compare.
|
|
||||||
# Run this job only when clickbench-compare is finished to avoid the intersection.
|
|
||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
|
||||||
#
|
|
||||||
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
|
||||||
if: success() || failure()
|
|
||||||
needs: [ clickbench-compare ]
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUILD_TYPE: remote
|
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Download Neon artifact
|
|
||||||
uses: ./.github/actions/download
|
|
||||||
with:
|
|
||||||
name: neon-${{ runner.os }}-release-artifact
|
|
||||||
path: /tmp/neon/
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Add Postgres binaries to PATH
|
|
||||||
run: |
|
|
||||||
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
|
||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
|
||||||
id: set-up-connstr
|
|
||||||
run: |
|
|
||||||
case "${PLATFORM}" in
|
|
||||||
neon-captest-prefetch)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-aurora)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_TPCH_S10_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-postgres)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run TPC-H benchmark
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance/test_perf_olap.py
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
|
||||||
env:
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
|
||||||
if: success() || failure()
|
|
||||||
uses: ./.github/actions/allure-report
|
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
|
||||||
if: ${{ github.event.schedule && failure() }}
|
|
||||||
uses: slackapi/slack-github-action@v1
|
|
||||||
with:
|
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
||||||
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
||||||
env:
|
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
||||||
|
|
||||||
user-examples-compare:
|
|
||||||
if: success() || failure()
|
|
||||||
needs: [ tpch-compare ]
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUILD_TYPE: remote
|
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Download Neon artifact
|
|
||||||
uses: ./.github/actions/download
|
|
||||||
with:
|
|
||||||
name: neon-${{ runner.os }}-release-artifact
|
|
||||||
path: /tmp/neon/
|
|
||||||
prefix: latest
|
|
||||||
|
|
||||||
- name: Add Postgres binaries to PATH
|
|
||||||
run: |
|
|
||||||
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
|
||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Set up Connection String
|
|
||||||
id: set-up-connstr
|
|
||||||
run: |
|
|
||||||
case "${PLATFORM}" in
|
|
||||||
neon-captest-prefetch)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-aurora)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_AURORA_CONNSTR }}
|
|
||||||
;;
|
|
||||||
rds-postgres)
|
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run user examples
|
|
||||||
uses: ./.github/actions/run-python-test-set
|
|
||||||
with:
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
test_selection: performance/test_perf_olap.py
|
|
||||||
run_in_parallel: false
|
|
||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
|
||||||
env:
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Create Allure report
|
|
||||||
if: success() || failure()
|
|
||||||
uses: ./.github/actions/allure-report
|
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
|
||||||
if: ${{ github.event.schedule && failure() }}
|
|
||||||
uses: slackapi/slack-github-action@v1
|
|
||||||
with:
|
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
|
||||||
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
|
||||||
env:
|
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
|
||||||
|
|||||||
646
.github/workflows/build_and_test.yml
vendored
646
.github/workflows/build_and_test.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: Build and Test
|
name: Test and Deploy
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -7,10 +7,6 @@ on:
|
|||||||
- release
|
- release
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
@@ -19,13 +15,11 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tag:
|
tag:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: dev
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
||||||
outputs:
|
outputs:
|
||||||
build-tag: ${{steps.build-tag.outputs.tag}}
|
build-tag: ${{steps.build-tag.outputs.tag}}
|
||||||
|
|
||||||
@@ -41,101 +35,18 @@ jobs:
|
|||||||
echo ref:$GITHUB_REF_NAME
|
echo ref:$GITHUB_REF_NAME
|
||||||
echo rev:$(git rev-list --count HEAD)
|
echo rev:$(git rev-list --count HEAD)
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
echo "::set-output name=tag::$(git rev-list --count HEAD)"
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
echo "::set-output name=tag::release-$(git rev-list --count HEAD)"
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
echo "::set-output name=tag::$GITHUB_RUN_ID"
|
||||||
fi
|
fi
|
||||||
shell: bash
|
shell: bash
|
||||||
id: build-tag
|
id: build-tag
|
||||||
|
|
||||||
check-codestyle-python:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: false
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Cache poetry deps
|
|
||||||
id: cache_poetry
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry/virtualenvs
|
|
||||||
key: v1-codestyle-python-deps-${{ hashFiles('poetry.lock') }}
|
|
||||||
|
|
||||||
- name: Install Python deps
|
|
||||||
run: ./scripts/pysync
|
|
||||||
|
|
||||||
- name: Run isort to ensure code format
|
|
||||||
run: poetry run isort --diff --check .
|
|
||||||
|
|
||||||
- name: Run black to ensure code format
|
|
||||||
run: poetry run black --diff --check .
|
|
||||||
|
|
||||||
- name: Run flake8 to ensure code format
|
|
||||||
run: poetry run flake8 .
|
|
||||||
|
|
||||||
- name: Run mypy to check types
|
|
||||||
run: poetry run mypy .
|
|
||||||
|
|
||||||
check-codestyle-rust:
|
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
# Disabled for now
|
|
||||||
# - name: Restore cargo deps cache
|
|
||||||
# id: cache_cargo
|
|
||||||
# uses: actions/cache@v3
|
|
||||||
# with:
|
|
||||||
# path: |
|
|
||||||
# !~/.cargo/registry/src
|
|
||||||
# ~/.cargo/git/
|
|
||||||
# target/
|
|
||||||
# key: v1-${{ runner.os }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
|
||||||
|
|
||||||
# Some of our rust modules use FFI and need those to be checked
|
|
||||||
- name: Get postgres headers
|
|
||||||
run: make postgres-headers -j$(nproc)
|
|
||||||
|
|
||||||
- name: Run cargo clippy
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
|
|
||||||
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
|
||||||
- name: Check formatting
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: cargo fmt --all -- --check
|
|
||||||
|
|
||||||
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
|
||||||
- name: Check rust dependencies
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: |
|
|
||||||
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
|
||||||
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
|
||||||
|
|
||||||
# https://github.com/EmbarkStudios/cargo-deny
|
|
||||||
- name: Check rust licenses/bans/advisories/sources
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: cargo deny check
|
|
||||||
|
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -143,6 +54,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build_type: [ debug, release ]
|
build_type: [ debug, release ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: ${{ matrix.build_type }}
|
BUILD_TYPE: ${{ matrix.build_type }}
|
||||||
GIT_VERSION: ${{ github.sha }}
|
GIT_VERSION: ${{ github.sha }}
|
||||||
@@ -166,11 +78,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Set pg 15 revision for caching
|
- name: Set pg 15 revision for caching
|
||||||
id: pg_v15_rev
|
id: pg_v15_rev
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
# Set some environment variables used by all the steps.
|
# Set some environment variables used by all the steps.
|
||||||
#
|
#
|
||||||
@@ -184,37 +98,37 @@ jobs:
|
|||||||
# corresponding Cargo.toml files for their descriptions.
|
# corresponding Cargo.toml files for their descriptions.
|
||||||
- name: Set env variables
|
- name: Set env variables
|
||||||
run: |
|
run: |
|
||||||
CARGO_FEATURES="--features testing"
|
|
||||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||||
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
||||||
CARGO_FLAGS="--locked $CARGO_FEATURES"
|
CARGO_FEATURES="--features testing"
|
||||||
|
CARGO_FLAGS="--locked --timings $CARGO_FEATURES"
|
||||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||||
cov_prefix=""
|
cov_prefix=""
|
||||||
CARGO_FLAGS="--locked --release $CARGO_FEATURES"
|
CARGO_FEATURES="--features testing,profiling"
|
||||||
|
CARGO_FLAGS="--locked --timings --release $CARGO_FEATURES"
|
||||||
fi
|
fi
|
||||||
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
||||||
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
||||||
echo "CARGO_FLAGS=${CARGO_FLAGS}" >> $GITHUB_ENV
|
echo "CARGO_FLAGS=${CARGO_FLAGS}" >> $GITHUB_ENV
|
||||||
echo "CARGO_HOME=${GITHUB_WORKSPACE}/.cargo" >> $GITHUB_ENV
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
# Disabled for now
|
|
||||||
# Don't include the ~/.cargo/registry/src directory. It contains just
|
# Don't include the ~/.cargo/registry/src directory. It contains just
|
||||||
# uncompressed versions of the crates in ~/.cargo/registry/cache
|
# uncompressed versions of the crates in ~/.cargo/registry/cache
|
||||||
# directory, and it's faster to let 'cargo' to rebuild it from the
|
# directory, and it's faster to let 'cargo' to rebuild it from the
|
||||||
# compressed crates.
|
# compressed crates.
|
||||||
# - name: Cache cargo deps
|
- name: Cache cargo deps
|
||||||
# id: cache_cargo
|
id: cache_cargo
|
||||||
# uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
# with:
|
with:
|
||||||
# path: |
|
path: |
|
||||||
# ~/.cargo/registry/
|
~/.cargo/registry/
|
||||||
# !~/.cargo/registry/src
|
!~/.cargo/registry/src
|
||||||
# ~/.cargo/git/
|
~/.cargo/git/
|
||||||
# target/
|
target/
|
||||||
# # Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
# Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
||||||
# key: |
|
key: |
|
||||||
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('Cargo.lock') }}
|
||||||
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-
|
v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
- name: Cache postgres v14 build
|
||||||
id: cache_pg_14
|
id: cache_pg_14
|
||||||
@@ -233,21 +147,26 @@ jobs:
|
|||||||
- name: Build postgres v14
|
- name: Build postgres v14
|
||||||
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
run: mold -run make postgres-v14 -j$(nproc)
|
run: mold -run make postgres-v14 -j$(nproc)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Build postgres v15
|
- name: Build postgres v15
|
||||||
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
run: mold -run make postgres-v15 -j$(nproc)
|
run: mold -run make postgres-v15 -j$(nproc)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Build neon extensions
|
- name: Build neon extensions
|
||||||
run: mold -run make neon-pg-ext -j$(nproc)
|
run: mold -run make neon-pg-ext -j$(nproc)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} mold -run cargo build $CARGO_FLAGS --bins --tests
|
${cov_prefix} mold -run cargo build $CARGO_FLAGS --bins --tests
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} cargo test $CARGO_FLAGS
|
${cov_prefix} cargo test $CARGO_FLAGS
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Install rust binaries
|
- name: Install rust binaries
|
||||||
run: |
|
run: |
|
||||||
@@ -288,9 +207,11 @@ jobs:
|
|||||||
echo "/tmp/neon/bin/$bin" >> /tmp/coverage/binaries.list
|
echo "/tmp/neon/bin/$bin" >> /tmp/coverage/binaries.list
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Install postgres binaries
|
- name: Install postgres binaries
|
||||||
run: cp -a pg_install /tmp/neon/pg_install
|
run: cp -a pg_install /tmp/neon/pg_install
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Upload Neon artifact
|
- name: Upload Neon artifact
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
@@ -298,13 +219,24 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
|
||||||
|
- name: Prepare cargo build timing stats for storing
|
||||||
|
run: |
|
||||||
|
mkdir -p "/tmp/neon/cargo-timings/$BUILD_TYPE/"
|
||||||
|
cp -r ./target/cargo-timings/* "/tmp/neon/cargo-timings/$BUILD_TYPE/"
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
- name: Upload cargo build stats
|
||||||
|
uses: ./.github/actions/upload
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-${{ matrix.build_type }}-build-stats
|
||||||
|
path: /tmp/neon/cargo-timings/
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
if: matrix.build_type == 'debug'
|
if: matrix.build_type == 'debug'
|
||||||
uses: ./.github/actions/save-coverage-data
|
uses: ./.github/actions/save-coverage-data
|
||||||
|
|
||||||
regress-tests:
|
regress-tests:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -318,7 +250,7 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -336,8 +268,34 @@ jobs:
|
|||||||
if: matrix.build_type == 'debug'
|
if: matrix.build_type == 'debug'
|
||||||
uses: ./.github/actions/save-coverage-data
|
uses: ./.github/actions/save-coverage-data
|
||||||
|
|
||||||
|
upload-latest-artifacts:
|
||||||
|
runs-on: dev
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
needs: [ regress-tests ]
|
||||||
|
if: github.ref_name == 'main'
|
||||||
|
steps:
|
||||||
|
- name: Copy Neon artifact to the latest directory
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
PREFIX: artifacts/${{ github.run_id }}
|
||||||
|
run: |
|
||||||
|
for build_type in debug release; do
|
||||||
|
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
||||||
|
|
||||||
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
|
if [ -z "${S3_KEY}" ]; then
|
||||||
|
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/artifacts/latest/${FILENAME}
|
||||||
|
done
|
||||||
|
|
||||||
benchmarks:
|
benchmarks:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -352,7 +310,7 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Pytest benchmarks
|
- name: Pytest benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -368,12 +326,12 @@ jobs:
|
|||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
merge-allure-report:
|
merge-allure-report:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ regress-tests, benchmarks ]
|
needs: [ regress-tests, benchmarks ]
|
||||||
if: ${{ !cancelled() }}
|
if: always()
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -398,6 +356,7 @@ jobs:
|
|||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
REPORT_URL: ${{ steps.create-allure-report.outputs.report-url }}
|
REPORT_URL: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
curl --fail --output suites.json ${REPORT_URL%/index.html}/data/suites.json
|
curl --fail --output suites.json ${REPORT_URL%/index.html}/data/suites.json
|
||||||
./scripts/pysync
|
./scripts/pysync
|
||||||
@@ -405,7 +364,7 @@ jobs:
|
|||||||
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
||||||
|
|
||||||
coverage-report:
|
coverage-report:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -421,17 +380,16 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
# Disabled for now
|
- name: Restore cargo deps cache
|
||||||
# - name: Restore cargo deps cache
|
id: cache_cargo
|
||||||
# id: cache_cargo
|
uses: actions/cache@v3
|
||||||
# uses: actions/cache@v3
|
with:
|
||||||
# with:
|
path: |
|
||||||
# path: |
|
~/.cargo/registry/
|
||||||
# ~/.cargo/registry/
|
!~/.cargo/registry/src
|
||||||
# !~/.cargo/registry/src
|
~/.cargo/git/
|
||||||
# ~/.cargo/git/
|
target/
|
||||||
# target/
|
key: v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('Cargo.lock') }}
|
||||||
# key: v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Get Neon artifact
|
- name: Get Neon artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
@@ -447,6 +405,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
- name: Build and upload coverage report
|
- name: Build and upload coverage report
|
||||||
run: |
|
run: |
|
||||||
@@ -479,21 +438,18 @@ jobs:
|
|||||||
\"description\": \"Coverage report is ready\",
|
\"description\": \"Coverage report is ready\",
|
||||||
\"target_url\": \"$REPORT_URL\"
|
\"target_url\": \"$REPORT_URL\"
|
||||||
}"
|
}"
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: dev
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ push-docker-hub, tag ]
|
needs: [ build-neon ]
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
# For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
|
|
||||||
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
|
|
||||||
# to place a job run status update later.
|
|
||||||
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
||||||
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
|
|
||||||
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
||||||
|
|
||||||
REMOTE_REPO="${{ github.repository_owner }}/cloud"
|
REMOTE_REPO="${{ github.repository_owner }}/cloud"
|
||||||
@@ -519,20 +475,13 @@ jobs:
|
|||||||
\"inputs\": {
|
\"inputs\": {
|
||||||
\"ci_job_name\": \"neon-cloud-e2e\",
|
\"ci_job_name\": \"neon-cloud-e2e\",
|
||||||
\"commit_hash\": \"$COMMIT_SHA\",
|
\"commit_hash\": \"$COMMIT_SHA\",
|
||||||
\"remote_repo\": \"${{ github.repository }}\",
|
\"remote_repo\": \"${{ github.repository }}\"
|
||||||
\"storage_image_tag\": \"${{ needs.tag.outputs.build-tag }}\",
|
|
||||||
\"compute_image_tag\": \"${{ needs.tag.outputs.build-tag }}\"
|
|
||||||
}
|
}
|
||||||
}"
|
}"
|
||||||
|
|
||||||
neon-image:
|
neon-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: dev
|
||||||
needs: [ tag ]
|
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||||
# https://github.com/GoogleContainerTools/kaniko/issues/2005
|
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: sh -eu {0}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -545,19 +494,11 @@ jobs:
|
|||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
- name: Kaniko build neon
|
- name: Kaniko build neon
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID
|
||||||
|
|
||||||
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
|
||||||
- name: Cleanup ECR folder
|
|
||||||
run: rm -rf ~/.ecr
|
|
||||||
|
|
||||||
compute-tools-image:
|
compute-tools-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: dev
|
||||||
needs: [ tag ]
|
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: sh -eu {0}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -567,23 +508,11 @@ jobs:
|
|||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
- name: Kaniko build compute tools
|
- name: Kaniko build compute tools
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
|
||||||
run: rm -rf ~/.ecr
|
|
||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: dev
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||||
needs: [ tag ]
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
version: [ v14, v15 ]
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: sh -eu {0}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v1 # v3 won't work with kaniko
|
uses: actions/checkout@v1 # v3 won't work with kaniko
|
||||||
@@ -594,136 +523,65 @@ jobs:
|
|||||||
- name: Configure ECR login
|
- name: Configure ECR login
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
- name: Kaniko build compute node with extensions
|
# compute-node uses postgres 14, which is default now
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --build-arg PG_VERSION=${{ matrix.version }} --dockerfile Dockerfile.compute-node --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
# cloud repo depends on this image name, thus duplicating it
|
||||||
|
# remove compute-node when cloud repo is updated
|
||||||
|
- name: Kaniko build compute node with extensions v14 (compatibility)
|
||||||
|
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
compute-node-image-v14:
|
||||||
run: rm -rf ~/.ecr
|
runs-on: dev
|
||||||
|
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v1 # v3 won't work with kaniko
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
vm-compute-node-image:
|
- name: Configure ECR login
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
needs: [ tag, compute-node-image ]
|
|
||||||
|
- name: Kaniko build compute node with extensions v14
|
||||||
|
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID
|
||||||
|
|
||||||
|
|
||||||
|
compute-node-image-v15:
|
||||||
|
runs-on: dev
|
||||||
|
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v1 # v3 won't work with kaniko
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure ECR login
|
||||||
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
|
- name: Kaniko build compute node with extensions v15
|
||||||
|
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v15 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID
|
||||||
|
|
||||||
|
promote-images:
|
||||||
|
runs-on: dev
|
||||||
|
needs: [ neon-image, compute-node-image, compute-node-image-v14, compute-tools-image ]
|
||||||
|
if: github.event_name != 'workflow_dispatch'
|
||||||
|
container: amazon/aws-cli
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
version: [ v14, v15 ]
|
# compute-node uses postgres 14, which is default now
|
||||||
defaults:
|
# cloud repo depends on this image name, thus duplicating it
|
||||||
run:
|
# remove compute-node when cloud repo is updated
|
||||||
shell: sh -eu {0}
|
name: [ neon, compute-node, compute-node-v14, compute-tools ]
|
||||||
env:
|
|
||||||
VM_INFORMANT_VERSION: 0.1.1
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Downloading latest vm-builder
|
- name: Promote image to latest
|
||||||
run: |
|
run:
|
||||||
curl -L https://github.com/neondatabase/neonvm/releases/latest/download/vm-builder -o vm-builder
|
MANIFEST=$(aws ecr batch-get-image --repository-name ${{ matrix.name }} --image-ids imageTag=$GITHUB_RUN_ID --query 'images[].imageManifest' --output text) && aws ecr put-image --repository-name ${{ matrix.name }} --image-tag latest --image-manifest "$MANIFEST"
|
||||||
chmod +x vm-builder
|
|
||||||
|
|
||||||
- name: Pulling compute-node image
|
|
||||||
run: |
|
|
||||||
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Downloading VM informant version ${{ env.VM_INFORMANT_VERSION }}
|
|
||||||
run: |
|
|
||||||
curl -fL https://github.com/neondatabase/autoscaling/releases/download/${{ env.VM_INFORMANT_VERSION }}/vm-informant -o vm-informant
|
|
||||||
chmod +x vm-informant
|
|
||||||
|
|
||||||
- name: Adding VM informant to compute-node image
|
|
||||||
run: |
|
|
||||||
ID=$(docker create 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}})
|
|
||||||
docker cp vm-informant $ID:/bin/vm-informant
|
|
||||||
docker commit $ID temp-vm-compute-node
|
|
||||||
docker rm -f $ID
|
|
||||||
|
|
||||||
- name: Build vm image
|
|
||||||
run: |
|
|
||||||
# note: as of 2023-01-12, vm-builder requires a trailing ":latest" for local images
|
|
||||||
./vm-builder -src=temp-vm-compute-node:latest -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Pushing vm-compute-node image
|
|
||||||
run: |
|
|
||||||
docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
test-images:
|
|
||||||
needs: [ tag, neon-image, compute-node-image, compute-tools-image ]
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# `neondatabase/neon` contains multiple binaries, all of them use the same input for the version into the same version formatting library.
|
|
||||||
# Pick pageserver as currently the only binary with extra "version" features printed in the string to verify.
|
|
||||||
# Regular pageserver version string looks like
|
|
||||||
# Neon page server git-env:32d14403bd6ab4f4520a94cbfd81a6acef7a526c failpoints: true, features: []
|
|
||||||
# Bad versions might loop like:
|
|
||||||
# Neon page server git-env:local failpoints: true, features: ["testing"]
|
|
||||||
# Ensure that we don't have bad versions.
|
|
||||||
- name: Verify image versions
|
|
||||||
shell: bash # ensure no set -e for better error messages
|
|
||||||
run: |
|
|
||||||
pageserver_version=$(docker run --rm 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} "/bin/sh" "-c" "/usr/local/bin/pageserver --version")
|
|
||||||
|
|
||||||
echo "Pageserver version string: $pageserver_version"
|
|
||||||
|
|
||||||
if ! echo "$pageserver_version" | grep -qv 'git-env:local' ; then
|
|
||||||
echo "Pageserver version should not be the default Dockerfile one"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! echo "$pageserver_version" | grep -qv '"testing"' ; then
|
|
||||||
echo "Pageserver version should have no testing feature enabled"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Verify docker-compose example
|
|
||||||
run: env REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
|
|
||||||
|
|
||||||
- name: Print logs and clean up
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
|
||||||
docker compose -f ./docker-compose/docker-compose.yml down
|
|
||||||
|
|
||||||
promote-images:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
needs: [ tag, test-images, vm-compute-node-image ]
|
|
||||||
container: golang:1.19-bullseye
|
|
||||||
if: github.event_name != 'workflow_dispatch'
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Install Crane & ECR helper
|
|
||||||
if: |
|
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
|
||||||
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: |
|
|
||||||
mkdir /github/home/.docker/
|
|
||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
|
||||||
|
|
||||||
- name: Add latest tag to images
|
|
||||||
if: |
|
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
|
||||||
run: rm -rf ~/.ecr
|
|
||||||
|
|
||||||
push-docker-hub:
|
push-docker-hub:
|
||||||
runs-on: [ self-hosted, dev, x64 ]
|
runs-on: dev
|
||||||
needs: [ promote-images, tag ]
|
needs: [ promote-images, tag ]
|
||||||
container: golang:1.19-bullseye
|
container: golang:1.19-bullseye
|
||||||
|
|
||||||
@@ -739,22 +597,16 @@ jobs:
|
|||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
||||||
|
|
||||||
- name: Pull neon image from ECR
|
- name: Pull neon image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} neon
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:latest neon
|
||||||
|
|
||||||
- name: Pull compute tools image from ECR
|
- name: Pull compute tools image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} compute-tools
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest compute-tools
|
||||||
|
|
||||||
|
- name: Pull compute node image from ECR
|
||||||
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:latest compute-node
|
||||||
|
|
||||||
- name: Pull compute node v14 image from ECR
|
- name: Pull compute node v14 image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} compute-node-v14
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest compute-node-v14
|
||||||
|
|
||||||
- name: Pull vm compute node v14 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14
|
|
||||||
|
|
||||||
- name: Pull compute node v15 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} compute-node-v15
|
|
||||||
|
|
||||||
- name: Pull vm compute node v15 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15
|
|
||||||
|
|
||||||
- name: Pull rust image from ECR
|
- name: Pull rust image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned rust
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned rust
|
||||||
@@ -764,12 +616,9 @@ jobs:
|
|||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
run: |
|
run: |
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/neon:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-tools:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest
|
|
||||||
|
|
||||||
- name: Configure Docker Hub login
|
- name: Configure Docker Hub login
|
||||||
run: |
|
run: |
|
||||||
@@ -783,18 +632,12 @@ jobs:
|
|||||||
- name: Push compute tools image to Docker Hub
|
- name: Push compute tools image to Docker Hub
|
||||||
run: crane push compute-tools neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
run: crane push compute-tools neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Push compute node image to Docker Hub
|
||||||
|
run: crane push compute-node neondatabase/compute-node:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Push compute node v14 image to Docker Hub
|
- name: Push compute node v14 image to Docker Hub
|
||||||
run: crane push compute-node-v14 neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}}
|
run: crane push compute-node-v14 neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Push vm compute node v14 image to Docker Hub
|
|
||||||
run: crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push compute node v15 image to Docker Hub
|
|
||||||
run: crane push compute-node-v15 neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push vm compute node v15 image to Docker Hub
|
|
||||||
run: crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push rust image to Docker Hub
|
- name: Push rust image to Docker Hub
|
||||||
run: crane push rust neondatabase/rust:pinned
|
run: crane push rust neondatabase/rust:pinned
|
||||||
|
|
||||||
@@ -805,29 +648,46 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/compute-node:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
calculate-deploy-targets:
|
||||||
run: rm -rf ~/.ecr
|
runs-on: [ self-hosted, Linux, k8s-runner ]
|
||||||
|
if: |
|
||||||
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
|
github.event_name != 'workflow_dispatch'
|
||||||
|
outputs:
|
||||||
|
matrix-include: ${{ steps.set-matrix.outputs.include }}
|
||||||
|
steps:
|
||||||
|
- id: set-matrix
|
||||||
|
run: |
|
||||||
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
|
STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA"}'
|
||||||
|
NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA"}'
|
||||||
|
echo "::set-output name=include::[$STAGING, $NEON_STRESS]"
|
||||||
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
|
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA"}'
|
||||||
|
echo "::set-output name=include::[$PRODUCTION]"
|
||||||
|
else
|
||||||
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
deploy-pr-test-new:
|
deploy:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, Linux, k8s-runner ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
#container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
||||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||||
if: |
|
if: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'deploy-test-storage') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
target_region: [ eu-west-1 ]
|
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -835,76 +695,78 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Setup ansible
|
||||||
|
run: |
|
||||||
|
export PATH="/root/.local/bin:$PATH"
|
||||||
|
pip install --progress-bar off --user ansible boto3
|
||||||
|
|
||||||
- name: Redeploy
|
- name: Redeploy
|
||||||
run: |
|
run: |
|
||||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
cd "$(pwd)/.github/ansible"
|
cd "$(pwd)/.github/ansible"
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
- name: Cleanup ansible folder
|
|
||||||
run: rm -rf ~/.ansible
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
|
||||||
if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: false
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Trigger deploy workflow
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ github.token }}
|
|
||||||
run: |
|
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
gh workflow run deploy-dev.yml --ref main -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}}
|
./get_binaries.sh
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
gh workflow run deploy-prod.yml --ref release -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}} -f disclamerAcknowledged=true
|
RELEASE=true ./get_binaries.sh
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
promote-compatibility-data:
|
eval $(ssh-agent)
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
echo "${{ secrets.TELEPORT_SSH_KEY }}" | tr -d '\n'| base64 --decode >ssh-key
|
||||||
container:
|
echo "${{ secrets.TELEPORT_SSH_CERT }}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
chmod 0600 ssh-key
|
||||||
options: --init
|
ssh-add ssh-key
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
rm -f ssh-key ssh-key-cert.pub
|
||||||
if: github.ref_name == 'release' && github.event_name != 'workflow_dispatch'
|
|
||||||
|
ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts
|
||||||
|
rm -f neon_install.tar.gz .neon_current_version
|
||||||
|
|
||||||
|
deploy-proxy:
|
||||||
|
runs-on: dev
|
||||||
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
||||||
|
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||||
|
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||||
|
if: |
|
||||||
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
|
github.event_name != 'workflow_dispatch'
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
||||||
|
env:
|
||||||
|
KUBECONFIG: .kubeconfig
|
||||||
steps:
|
steps:
|
||||||
- name: Promote compatibility snapshot for the release
|
- name: Checkout
|
||||||
env:
|
uses: actions/checkout@v3
|
||||||
BUCKET: neon-github-public-dev
|
with:
|
||||||
PREFIX: artifacts/latest
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Add curl
|
||||||
|
run: apt update && apt install curl -y
|
||||||
|
|
||||||
|
- name: Store kubeconfig file
|
||||||
run: |
|
run: |
|
||||||
# Update compatibility snapshot for the release
|
echo "${{ secrets[matrix.kubeconfig_secret] }}" | base64 --decode > ${KUBECONFIG}
|
||||||
for build_type in debug release; do
|
chmod 0600 ${KUBECONFIG}
|
||||||
OLD_FILENAME=compatibility-snapshot-${build_type}-pg14-${GITHUB_RUN_ID}.tar.zst
|
|
||||||
NEW_FILENAME=compatibility-snapshot-${build_type}-pg14.tar.zst
|
|
||||||
|
|
||||||
time aws s3 mv --only-show-errors s3://${BUCKET}/${PREFIX}/${OLD_FILENAME} s3://${BUCKET}/${PREFIX}/${NEW_FILENAME}
|
- name: Setup helm v3
|
||||||
done
|
run: |
|
||||||
|
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||||
|
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
|
||||||
# Update Neon artifact for the release (reuse already uploaded artifact)
|
- name: Re-deploy proxy
|
||||||
for build_type in debug release; do
|
run: |
|
||||||
OLD_PREFIX=artifacts/${GITHUB_RUN_ID}
|
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
helm upgrade ${{ matrix.proxy_job }} neondatabase/neon-proxy --namespace default --install -f .github/helm-values/${{ matrix.proxy_config }}.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
||||||
|
helm upgrade ${{ matrix.proxy_job }}-scram neondatabase/neon-proxy --namespace default --install -f .github/helm-values/${{ matrix.proxy_config }}-scram.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${OLD_PREFIX} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
|
||||||
if [ -z "${S3_KEY}" ]; then
|
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${OLD_PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/${PREFIX}/${FILENAME}
|
|
||||||
done
|
|
||||||
|
|||||||
166
.github/workflows/codestyle.yml
vendored
Normal file
166
.github/workflows/codestyle.yml
vendored
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
name: Check code style and build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# Allow only one workflow per any non-`main` branch.
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
COPT: '-Werror'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-codestyle-rust:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# XXX: both OSes have rustup
|
||||||
|
# * https://github.com/actions/runner-images/blob/main/images/macos/macos-12-Readme.md#rust-tools
|
||||||
|
# * https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md#rust-tools
|
||||||
|
# this is all we need to install our toolchain later via rust-toolchain.toml
|
||||||
|
# so don't install any toolchain explicitly.
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
timeout-minutes: 90
|
||||||
|
name: check codestyle rust and postgres
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: Check formatting
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
|
- name: Install Ubuntu postgres dependencies
|
||||||
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev
|
||||||
|
|
||||||
|
- name: Install macOS postgres dependencies
|
||||||
|
if: matrix.os == 'macos-latest'
|
||||||
|
run: brew install flex bison openssl
|
||||||
|
|
||||||
|
- name: Set pg 14 revision for caching
|
||||||
|
id: pg_v14_rev
|
||||||
|
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
- name: Set pg 15 revision for caching
|
||||||
|
id: pg_v15_rev
|
||||||
|
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg_14
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: pg_install/v14
|
||||||
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v15 build
|
||||||
|
id: cache_pg_15
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
|
if: matrix.os == 'macos-latest'
|
||||||
|
run: |
|
||||||
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build postgres v14
|
||||||
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v14
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
- name: Build postgres v15
|
||||||
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v15
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
- name: Build neon extensions
|
||||||
|
run: make neon-pg-ext
|
||||||
|
|
||||||
|
- name: Cache cargo deps
|
||||||
|
id: cache_cargo
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: v5-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-rust
|
||||||
|
|
||||||
|
- name: Run cargo clippy
|
||||||
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
|
- name: Ensure all project builds
|
||||||
|
run: cargo build --locked --all --all-targets
|
||||||
|
|
||||||
|
check-rust-dependencies:
|
||||||
|
runs-on: dev
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: false
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
||||||
|
- name: Check every project module is covered by Hakari
|
||||||
|
run: |
|
||||||
|
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
||||||
|
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
check-codestyle-python:
|
||||||
|
runs-on: [ self-hosted, Linux, k8s-runner ]
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: false
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Cache poetry deps
|
||||||
|
id: cache_poetry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pypoetry/virtualenvs
|
||||||
|
key: v1-codestyle-python-deps-${{ hashFiles('poetry.lock') }}
|
||||||
|
|
||||||
|
- name: Install Python deps
|
||||||
|
run: ./scripts/pysync
|
||||||
|
|
||||||
|
- name: Run isort to ensure code format
|
||||||
|
run: poetry run isort --diff --check .
|
||||||
|
|
||||||
|
- name: Run black to ensure code format
|
||||||
|
run: poetry run black --diff --check .
|
||||||
|
|
||||||
|
- name: Run flake8 to ensure code format
|
||||||
|
run: poetry run flake8 .
|
||||||
|
|
||||||
|
- name: Run mypy to check types
|
||||||
|
run: poetry run mypy .
|
||||||
179
.github/workflows/deploy-dev.yml
vendored
179
.github/workflows/deploy-dev.yml
vendored
@@ -1,179 +0,0 @@
|
|||||||
name: Neon Deploy dev
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dockerTag:
|
|
||||||
description: 'Docker tag to deploy'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch or commit used for deploy scripts and configs'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: 'main'
|
|
||||||
deployStorage:
|
|
||||||
description: 'Deploy storage'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployProxy:
|
|
||||||
description: 'Deploy proxy'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployStorageBroker:
|
|
||||||
description: 'Deploy storage-broker'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: deploy-dev
|
|
||||||
cancel-in-progress: false
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy-storage-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
options: --user root --privileged
|
|
||||||
if: inputs.deployStorage
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target_region: [ eu-west-1, us-east-2 ]
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
|
||||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
cd "$(pwd)/.github/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
- name: Cleanup ansible folder
|
|
||||||
run: rm -rf ~/.ansible
|
|
||||||
|
|
||||||
deploy-proxy-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployProxy
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: dev-us-east-2-beta
|
|
||||||
deploy_link_proxy: true
|
|
||||||
deploy_legacy_scram_proxy: true
|
|
||||||
- target_region: eu-west-1
|
|
||||||
target_cluster: dev-eu-west-1-zeta
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure AWS Credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-skip-session-tagging: true
|
|
||||||
role-duration-seconds: 1800
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Re-deploy scram proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy link proxy
|
|
||||||
if: matrix.deploy_link_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy legacy scram proxy
|
|
||||||
if: matrix.deploy_legacy_scram_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
|
|
||||||
deploy-storage-broker-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployStorageBroker
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: dev-us-east-2-beta
|
|
||||||
- target_region: eu-west-1
|
|
||||||
target_cluster: dev-eu-west-1-zeta
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure AWS Credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-skip-session-tagging: true
|
|
||||||
role-duration-seconds: 1800
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Deploy storage-broker
|
|
||||||
run:
|
|
||||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
240
.github/workflows/deploy-prod.yml
vendored
240
.github/workflows/deploy-prod.yml
vendored
@@ -1,240 +0,0 @@
|
|||||||
name: Neon Deploy prod
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dockerTag:
|
|
||||||
description: 'Docker tag to deploy'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch or commit used for deploy scripts and configs'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: 'release'
|
|
||||||
deployStorage:
|
|
||||||
description: 'Deploy storage'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployProxy:
|
|
||||||
description: 'Deploy proxy'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployStorageBroker:
|
|
||||||
description: 'Deploy storage-broker'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
disclamerAcknowledged:
|
|
||||||
description: 'I confirm that there is an emergency and I can not use regular release workflow'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: deploy-prod
|
|
||||||
cancel-in-progress: false
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target_region: [ us-east-2, us-west-2, eu-central-1, ap-southeast-1 ]
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
|
||||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
cd "$(pwd)/.github/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ansible-playbook deploy.yaml -i prod.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-proxy-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployProxy && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: prod-us-east-2-delta
|
|
||||||
deploy_link_proxy: true
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
- target_region: us-west-2
|
|
||||||
target_cluster: prod-us-west-2-eta
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: true
|
|
||||||
- target_region: eu-central-1
|
|
||||||
target_cluster: prod-eu-central-1-gamma
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
- target_region: ap-southeast-1
|
|
||||||
target_cluster: prod-ap-southeast-1-epsilon
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Re-deploy scram proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy link proxy
|
|
||||||
if: matrix.deploy_link_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy legacy scram proxy
|
|
||||||
if: matrix.deploy_legacy_scram_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
deploy-storage-broker-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: prod-us-east-2-delta
|
|
||||||
- target_region: us-west-2
|
|
||||||
target_cluster: prod-us-west-2-eta
|
|
||||||
- target_region: eu-central-1
|
|
||||||
target_cluster: prod-eu-central-1-gamma
|
|
||||||
- target_region: ap-southeast-1
|
|
||||||
target_cluster: prod-ap-southeast-1-epsilon
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Deploy storage-broker
|
|
||||||
run:
|
|
||||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
|
||||||
|
|
||||||
# Deploy to old account below
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
environment:
|
|
||||||
name: prod-old
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
|
||||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
cd "$(pwd)/.github/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
eval $(ssh-agent)
|
|
||||||
echo "${{ secrets.TELEPORT_SSH_KEY }}" | tr -d '\n'| base64 --decode >ssh-key
|
|
||||||
echo "${{ secrets.TELEPORT_SSH_CERT }}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
|
||||||
chmod 0600 ssh-key
|
|
||||||
ssh-add ssh-key
|
|
||||||
rm -f ssh-key ssh-key-cert.pub
|
|
||||||
ANSIBLE_CONFIG=./ansible.cfg ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ANSIBLE_CONFIG=./ansible.cfg ansible-playbook deploy.yaml -i production.hosts.yaml -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ansible/collections': Permission denied
|
|
||||||
- name: Cleanup ansible folder
|
|
||||||
run: rm -rf ~/.ansible
|
|
||||||
|
|
||||||
deploy-storage-broker:
|
|
||||||
name: deploy storage broker on old staging and old prod
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
environment:
|
|
||||||
name: prod-old
|
|
||||||
env:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Store kubeconfig file
|
|
||||||
run: |
|
|
||||||
echo "${{ secrets.PRODUCTION_KUBECONFIG_DATA }}" | base64 --decode > ${KUBECONFIG}
|
|
||||||
chmod 0600 ${KUBECONFIG}
|
|
||||||
|
|
||||||
- name: Add neon helm chart
|
|
||||||
run: helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
|
|
||||||
- name: Deploy storage-broker
|
|
||||||
run:
|
|
||||||
helm upgrade neon-storage-broker neondatabase/neon-storage-broker --namespace neon-storage-broker --create-namespace --install --atomic -f .github/helm-values/production.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
154
.github/workflows/neon_extra_builds.yml
vendored
154
.github/workflows/neon_extra_builds.yml
vendored
@@ -1,154 +0,0 @@
|
|||||||
name: Check neon with extra platform builds
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
# Allow only one workflow per any non-`main` branch.
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
COPT: '-Werror'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-macos-build:
|
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos')
|
|
||||||
timeout-minutes: 90
|
|
||||||
runs-on: macos-latest
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Use release build only, to have less debug info around
|
|
||||||
# Hence keeping target/ (and general cache size) smaller
|
|
||||||
BUILD_TYPE: release
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Install macOS postgres dependencies
|
|
||||||
run: brew install flex bison openssl protobuf
|
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
|
||||||
id: pg_v14_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set pg 15 revision for caching
|
|
||||||
id: pg_v15_rev
|
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
|
||||||
id: cache_pg_14
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: pg_install/v14
|
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache postgres v15 build
|
|
||||||
id: cache_pg_15
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: pg_install/v15
|
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Cache cargo deps
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v1-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
|
||||||
|
|
||||||
- name: Build postgres v14
|
|
||||||
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v14 -j$(nproc)
|
|
||||||
|
|
||||||
- name: Build postgres v15
|
|
||||||
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v15 -j$(nproc)
|
|
||||||
|
|
||||||
- name: Build neon extensions
|
|
||||||
run: make neon-pg-ext -j$(nproc)
|
|
||||||
|
|
||||||
- name: Run cargo build
|
|
||||||
run: cargo build --all --release
|
|
||||||
|
|
||||||
- name: Check that no warnings are produced
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
|
|
||||||
gather-rust-build-stats:
|
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats')
|
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILD_TYPE: release
|
|
||||||
# remove the cachepot wrapper and build without crate caches
|
|
||||||
RUSTC_WRAPPER: ""
|
|
||||||
# build with incremental compilation produce partial results
|
|
||||||
# so do not attempt to cache this build, also disable the incremental compilation
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
# Some of our rust modules use FFI and need those to be checked
|
|
||||||
- name: Get postgres headers
|
|
||||||
run: make postgres-headers -j$(nproc)
|
|
||||||
|
|
||||||
- name: Produce the build stats
|
|
||||||
run: cargo build --all --release --timings
|
|
||||||
|
|
||||||
- name: Upload the build stats
|
|
||||||
id: upload-stats
|
|
||||||
env:
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
run: |
|
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
|
||||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
|
||||||
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Publish build stats report
|
|
||||||
uses: actions/github-script@v6
|
|
||||||
env:
|
|
||||||
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
|
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const { REPORT_URL, SHA } = process.env
|
|
||||||
|
|
||||||
await github.rest.repos.createCommitStatus({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
sha: `${SHA}`,
|
|
||||||
state: 'success',
|
|
||||||
target_url: `${REPORT_URL}`,
|
|
||||||
context: `Build stats (release)`,
|
|
||||||
})
|
|
||||||
6
.github/workflows/pg_clients.yml
vendored
6
.github/workflows/pg_clients.yml
vendored
@@ -23,7 +23,6 @@ jobs:
|
|||||||
runs-on: [ ubuntu-latest ]
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -52,8 +51,8 @@ jobs:
|
|||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
|
environment: staging
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
|
||||||
|
|
||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
@@ -64,7 +63,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
# Test framework expects we have psql binary;
|
# Test framework expects we have psql binary;
|
||||||
# but since we don't really need it in this test, let's mock it
|
# but since we don't really need it in this test, let's mock it
|
||||||
mkdir -p "$POSTGRES_DISTRIB_DIR/v${DEFAULT_PG_VERSION}/bin" && touch "$POSTGRES_DISTRIB_DIR/v${DEFAULT_PG_VERSION}/bin/psql";
|
mkdir -p "$POSTGRES_DISTRIB_DIR/v14/bin" && touch "$POSTGRES_DISTRIB_DIR/v14/bin/psql";
|
||||||
./scripts/pytest \
|
./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
--tb=short \
|
--tb=short \
|
||||||
@@ -76,6 +75,7 @@ jobs:
|
|||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
with:
|
with:
|
||||||
|
environment: staging
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
|
|||||||
33
.github/workflows/release.yml
vendored
33
.github/workflows/release.yml
vendored
@@ -1,33 +0,0 @@
|
|||||||
name: Create Release Branch
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 10 * * 2'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
create_release_branch:
|
|
||||||
runs-on: [ubuntu-latest]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: main
|
|
||||||
|
|
||||||
- name: Get current date
|
|
||||||
id: date
|
|
||||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Create release branch
|
|
||||||
run: git checkout -b releases/${{ steps.date.outputs.date }}
|
|
||||||
|
|
||||||
- name: Push new branch
|
|
||||||
run: git push origin releases/${{ steps.date.outputs.date }}
|
|
||||||
|
|
||||||
- name: Create pull request into release
|
|
||||||
uses: thomaseizinger/create-pull-request@e3972219c86a56550fb70708d96800d8e24ba862 # 1.3.0
|
|
||||||
with:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
head: releases/${{ steps.date.outputs.date }}
|
|
||||||
base: release
|
|
||||||
title: Release ${{ steps.date.outputs.date }}
|
|
||||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,7 +1,7 @@
|
|||||||
[submodule "vendor/postgres-v14"]
|
[submodule "vendor/postgres-v14"]
|
||||||
path = vendor/postgres-v14
|
path = vendor/postgres-v14
|
||||||
url = https://github.com/neondatabase/postgres.git
|
url = https://github.com/neondatabase/postgres.git
|
||||||
branch = REL_14_STABLE_neon
|
branch = main
|
||||||
[submodule "vendor/postgres-v15"]
|
[submodule "vendor/postgres-v15"]
|
||||||
path = vendor/postgres-v15
|
path = vendor/postgres-v15
|
||||||
url = https://github.com/neondatabase/postgres.git
|
url = https://github.com/neondatabase/postgres.git
|
||||||
|
|||||||
11
CODEOWNERS
11
CODEOWNERS
@@ -1,11 +0,0 @@
|
|||||||
/compute_tools/ @neondatabase/control-plane
|
|
||||||
/control_plane/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/pageserver_api/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/libs/postgres_ffi/ @neondatabase/compute
|
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
|
||||||
/libs/safekeeper_api/ @neondatabase/safekeepers
|
|
||||||
/pageserver/ @neondatabase/compute @neondatabase/storage
|
|
||||||
/pgxn/ @neondatabase/compute
|
|
||||||
/proxy/ @neondatabase/control-plane
|
|
||||||
/safekeeper/ @neondatabase/safekeepers
|
|
||||||
/vendor/ @neondatabase/compute
|
|
||||||
3070
Cargo.lock
generated
3070
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
157
Cargo.toml
157
Cargo.toml
@@ -5,166 +5,15 @@ members = [
|
|||||||
"pageserver",
|
"pageserver",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"storage_broker",
|
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
"trace",
|
|
||||||
"libs/*",
|
"libs/*",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
|
||||||
edition = "2021"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
|
|
||||||
## All dependency versions, used in the project
|
|
||||||
[workspace.dependencies]
|
|
||||||
anyhow = { version = "1.0", features = ["backtrace"] }
|
|
||||||
async-stream = "0.3"
|
|
||||||
async-trait = "0.1"
|
|
||||||
atty = "0.2.14"
|
|
||||||
aws-config = { version = "0.51.0", default-features = false, features=["rustls"] }
|
|
||||||
aws-sdk-s3 = "0.21.0"
|
|
||||||
aws-smithy-http = "0.51.0"
|
|
||||||
aws-types = "0.51.0"
|
|
||||||
base64 = "0.13.0"
|
|
||||||
bincode = "1.3"
|
|
||||||
bindgen = "0.61"
|
|
||||||
bstr = "1.0"
|
|
||||||
byteorder = "1.4"
|
|
||||||
bytes = "1.0"
|
|
||||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
|
||||||
close_fds = "0.3.2"
|
|
||||||
comfy-table = "6.1"
|
|
||||||
const_format = "0.2"
|
|
||||||
crc32c = "0.6"
|
|
||||||
crossbeam-utils = "0.8.5"
|
|
||||||
enum-map = "2.4.2"
|
|
||||||
enumset = "1.0.12"
|
|
||||||
fail = "0.5.0"
|
|
||||||
fs2 = "0.4.3"
|
|
||||||
futures = "0.3"
|
|
||||||
futures-core = "0.3"
|
|
||||||
futures-util = "0.3"
|
|
||||||
git-version = "0.3"
|
|
||||||
hashbrown = "0.13"
|
|
||||||
hashlink = "0.8.1"
|
|
||||||
hex = "0.4"
|
|
||||||
hex-literal = "0.3"
|
|
||||||
hmac = "0.12.1"
|
|
||||||
hostname = "0.3.1"
|
|
||||||
humantime = "2.1"
|
|
||||||
humantime-serde = "1.1.1"
|
|
||||||
hyper = "0.14"
|
|
||||||
hyper-tungstenite = "0.9"
|
|
||||||
itertools = "0.10"
|
|
||||||
jsonwebtoken = "8"
|
|
||||||
libc = "0.2"
|
|
||||||
md5 = "0.7.0"
|
|
||||||
memoffset = "0.8"
|
|
||||||
nix = "0.26"
|
|
||||||
notify = "5.0.0"
|
|
||||||
num-traits = "0.2.15"
|
|
||||||
once_cell = "1.13"
|
|
||||||
opentelemetry = "0.18.0"
|
|
||||||
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
|
||||||
opentelemetry-semantic-conventions = "0.10.0"
|
|
||||||
tracing-opentelemetry = "0.18.0"
|
|
||||||
parking_lot = "0.12"
|
|
||||||
pin-project-lite = "0.2"
|
|
||||||
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
|
||||||
prost = "0.11"
|
|
||||||
rand = "0.8"
|
|
||||||
regex = "1.4"
|
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
|
||||||
routerify = "3"
|
|
||||||
rpds = "0.12.0"
|
|
||||||
rustls = "0.20"
|
|
||||||
rustls-pemfile = "1"
|
|
||||||
rustls-split = "0.3"
|
|
||||||
scopeguard = "1.1"
|
|
||||||
sentry = { version = "0.29", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
serde_json = "1"
|
|
||||||
serde_with = "2.0"
|
|
||||||
sha2 = "0.10.2"
|
|
||||||
signal-hook = "0.3"
|
|
||||||
socket2 = "0.4.4"
|
|
||||||
strum = "0.24"
|
|
||||||
strum_macros = "0.24"
|
|
||||||
svg_fmt = "0.4.1"
|
|
||||||
tar = "0.4"
|
|
||||||
thiserror = "1.0"
|
|
||||||
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
|
||||||
tokio = { version = "1.17", features = ["macros"] }
|
|
||||||
tokio-postgres-rustls = "0.9.0"
|
|
||||||
tokio-rustls = "0.23"
|
|
||||||
tokio-stream = "0.1"
|
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
|
||||||
toml = "0.5"
|
|
||||||
toml_edit = { version = "0.17", features = ["easy"] }
|
|
||||||
tonic = {version = "0.8", features = ["tls", "tls-roots"]}
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
url = "2.2"
|
|
||||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
|
||||||
walkdir = "2.3.2"
|
|
||||||
webpki-roots = "0.22.5"
|
|
||||||
x509-parser = "0.14"
|
|
||||||
|
|
||||||
## TODO replace this with tracing
|
|
||||||
env_logger = "0.10"
|
|
||||||
log = "0.4"
|
|
||||||
|
|
||||||
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
|
||||||
|
|
||||||
## Other git libraries
|
|
||||||
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
|
||||||
|
|
||||||
## Local libraries
|
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
|
||||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
|
||||||
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
|
||||||
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
|
||||||
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
|
||||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
|
||||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
|
||||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
|
||||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
|
||||||
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
|
|
||||||
tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" }
|
|
||||||
utils = { version = "0.1", path = "./libs/utils/" }
|
|
||||||
|
|
||||||
## Common library dependency
|
|
||||||
workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
|
||||||
|
|
||||||
## Build dependencies
|
|
||||||
criterion = "0.4"
|
|
||||||
rcgen = "0.10"
|
|
||||||
rstest = "0.16"
|
|
||||||
tempfile = "3.2"
|
|
||||||
tonic-build = "0.8"
|
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
|
||||||
[patch.crates-io]
|
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
|
|
||||||
################# Binary contents sections
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
# This is useful for profiling and, to some extent, debug.
|
# This is useful for profiling and, to some extent, debug.
|
||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
# disable debug symbols for all packages except this one to decrease binaries size
|
|
||||||
[profile.release.package."*"]
|
|
||||||
debug = false
|
|
||||||
|
|
||||||
[profile.release-line-debug]
|
[profile.release-line-debug]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
debug = 1 # true = 2 = all symbols, 1 = line only
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
@@ -216,3 +65,9 @@ inherits = "release"
|
|||||||
debug = false # true = 2 = all symbols, 1 = line only
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
opt-level = "z"
|
opt-level = "z"
|
||||||
lto = true
|
lto = true
|
||||||
|
|
||||||
|
|
||||||
|
# This is only needed for proxy's tests.
|
||||||
|
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||||
|
[patch.crates-io]
|
||||||
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
|
|||||||
14
Dockerfile
14
Dockerfile
@@ -44,7 +44,7 @@ COPY . .
|
|||||||
# Show build caching stats to check if it was used in the end.
|
# Show build caching stats to check if it was used in the end.
|
||||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \
|
&& mold -run cargo build --bin pageserver --bin safekeeper --bin proxy --locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Build final image
|
# Build final image
|
||||||
@@ -63,12 +63,9 @@ RUN set -e \
|
|||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
|
||||||
|
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
||||||
@@ -79,7 +76,7 @@ COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/
|
|||||||
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
|
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
|
||||||
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
|
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
|
||||||
-c "id=1234" \
|
-c "id=1234" \
|
||||||
-c "broker_endpoint='http://storage_broker:50051'" \
|
-c "broker_endpoints=['http://etcd:2379']" \
|
||||||
-c "pg_distrib_dir='/usr/local/'" \
|
-c "pg_distrib_dir='/usr/local/'" \
|
||||||
-c "listen_pg_addr='0.0.0.0:6400'" \
|
-c "listen_pg_addr='0.0.0.0:6400'" \
|
||||||
-c "listen_http_addr='0.0.0.0:9898'"
|
-c "listen_http_addr='0.0.0.0:9898'"
|
||||||
@@ -88,3 +85,4 @@ VOLUME ["/data"]
|
|||||||
USER neon
|
USER neon
|
||||||
EXPOSE 6400
|
EXPOSE 6400
|
||||||
EXPOSE 9898
|
EXPOSE 9898
|
||||||
|
CMD ["/bin/bash"]
|
||||||
|
|||||||
@@ -1,258 +0,0 @@
|
|||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
|
||||||
ARG IMAGE=rust
|
|
||||||
ARG TAG=pinned
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "build-deps"
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
|
||||||
libicu-dev
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "pg-build"
|
|
||||||
# Build Postgres from the neon postgres repository.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
ARG PG_VERSION
|
|
||||||
COPY vendor/postgres-${PG_VERSION} postgres
|
|
||||||
RUN cd postgres && \
|
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp --with-icu && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
|
||||||
# Enable some of contrib extensions
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "postgis-build"
|
|
||||||
# Build PostGIS from the upstream PostGIS mirror.
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS postgis-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y cmake gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
|
||||||
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
|
||||||
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
|
||||||
protobuf-c-compiler xsltproc
|
|
||||||
|
|
||||||
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz && \
|
|
||||||
tar zxvf SFCGAL-v1.3.10.tar.gz && \
|
|
||||||
cd SFCGAL-v1.3.10 && cmake . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make clean && cp -R /sfcgal/* /
|
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
|
||||||
tar xvzf postgis-3.3.1.tar.gz && \
|
|
||||||
cd postgis-3.3.1 && \
|
|
||||||
./autogen.sh && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
cd extensions/postgis && \
|
|
||||||
make clean && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_sfcgal.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "plv8-build"
|
|
||||||
# Build plv8
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS plv8-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
|
||||||
|
|
||||||
# https://github.com/plv8/plv8/issues/475:
|
|
||||||
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
|
||||||
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
|
||||||
# Install newer gold version manually as debian-testing binutils version updates
|
|
||||||
# libc version, which in turn breaks other extension built against non-testing libc.
|
|
||||||
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
|
||||||
tar xvzf binutils-2.38.tar.gz && \
|
|
||||||
cd binutils-2.38 && \
|
|
||||||
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
cd ../bfd && ./configure && make bfdver.h && \
|
|
||||||
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
|
||||||
cp /usr/local/bin/ld.gold /usr/bin/gold
|
|
||||||
|
|
||||||
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
|
||||||
tar xvzf v3.1.4.tar.gz && \
|
|
||||||
cd plv8-3.1.4 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
|
||||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
rm -rf /plv8-* && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "h3-pg-build"
|
|
||||||
# Build h3_pg
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS h3-pg-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
# packaged cmake is too old
|
|
||||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
|
||||||
-q -O /tmp/cmake-install.sh \
|
|
||||||
&& chmod u+x /tmp/cmake-install.sh \
|
|
||||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
|
||||||
&& rm /tmp/cmake-install.sh
|
|
||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
|
||||||
tar xvzf h3.tgz && \
|
|
||||||
cd h3-4.0.1 && \
|
|
||||||
mkdir build && \
|
|
||||||
cd build && \
|
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
DESTDIR=/h3 make install && \
|
|
||||||
cp -R /h3/usr / && \
|
|
||||||
rm -rf build
|
|
||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
|
||||||
tar xvzf h3-pg.tgz && \
|
|
||||||
cd h3-pg-4.0.1 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "unit-pg-build"
|
|
||||||
# compile unit extension
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS unit-pg-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz && \
|
|
||||||
tar xvzf 7.7.tar.gz && \
|
|
||||||
cd postgresql-unit-7.7 && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
|
||||||
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
|
||||||
# We move the extension from '/usr/local/pgsql/' to '/usr/local/' after it is build. So we need to adjust the path.
|
|
||||||
# This one-liner removes pgsql/ part of the path.
|
|
||||||
# NOTE: Other extensions that rely on MODULEDIR variable after building phase will need the same fix.
|
|
||||||
find /usr/local/pgsql/share/extension/ -name "unit*.sql" -print0 | xargs -0 sed -i "s|pgsql/||g" && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/unit.control
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Layer "neon-pg-ext-build"
|
|
||||||
# compile neon extensions
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM build-deps AS neon-pg-ext-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=postgis-build /sfcgal/* /
|
|
||||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /h3/usr /
|
|
||||||
COPY --from=unit-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY pgxn/ pgxn/
|
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
|
||||||
-C pgxn/neon \
|
|
||||||
-s install
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Compile and run the Neon-specific `compute_ctl` binary
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Clean up postgres folder before inclusion
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
|
||||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
|
||||||
|
|
||||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
|
||||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
|
||||||
|
|
||||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
|
||||||
RUN rm -r /usr/local/pgsql/include
|
|
||||||
|
|
||||||
# Remove static postgresql libraries - all compilation is finished, so we
|
|
||||||
# can now remove these files - they must be included in other binaries by now
|
|
||||||
# if they were to be used by other libraries.
|
|
||||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
|
||||||
|
|
||||||
#########################################################################################
|
|
||||||
#
|
|
||||||
# Final layer
|
|
||||||
# Put it all together into the final image
|
|
||||||
#
|
|
||||||
#########################################################################################
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute && \
|
|
||||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# Install:
|
|
||||||
# libreadline8 for psql
|
|
||||||
# libicu67, locales for collations (including ICU)
|
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
|
||||||
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
|
||||||
RUN apt update && \
|
|
||||||
apt install --no-install-recommends -y \
|
|
||||||
locales \
|
|
||||||
libicu67 \
|
|
||||||
libreadline8 \
|
|
||||||
libossp-uuid16 \
|
|
||||||
libgeos-c1v5 \
|
|
||||||
libgdal28 \
|
|
||||||
libproj19 \
|
|
||||||
libprotobuf-c1 \
|
|
||||||
libsfcgal1 \
|
|
||||||
gdb && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
|
||||||
USER postgres
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
200
Dockerfile.compute-node-v14
Normal file
200
Dockerfile.compute-node-v14
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
ARG TAG=pinned
|
||||||
|
# apparently, ARGs don't get replaced in RUN commands in kaniko
|
||||||
|
# ARG POSTGIS_VERSION=3.3.0
|
||||||
|
# ARG PLV8_VERSION=3.1.4
|
||||||
|
# ARG PG_VERSION=v14
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
||||||
|
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
||||||
|
apt update
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||||
|
libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libglib2.0-dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
COPY vendor/postgres-v14 postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-uuid=ossp && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
# PostGIS compiles against neon postgres sources without changes. Perhaps we
|
||||||
|
# could even use the upstream binaries, compiled against vanilla Postgres, but
|
||||||
|
# it would require some investigation to check that it works, and also keeps
|
||||||
|
# working in the future. So for now, we compile our own binaries.
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.0.tar.gz && \
|
||||||
|
tar xvzf postgis-3.3.0.tar.gz && \
|
||||||
|
cd postgis-3.3.0 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
./configure && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5
|
||||||
|
|
||||||
|
# https://github.com/plv8/plv8/issues/475
|
||||||
|
# Debian bullseye provides binutils 2.35 when >= 2.38 is necessary
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y --no-install-recommends -t testing binutils
|
||||||
|
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||||
|
tar xvzf v3.1.4.tar.gz && \
|
||||||
|
cd plv8-3.1.4 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "h3-pg-build"
|
||||||
|
# Build h3_pg
|
||||||
|
#
|
||||||
|
FROM build-deps AS h3-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# packaged cmake is too old
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y --no-install-recommends -t testing cmake
|
||||||
|
|
||||||
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||||
|
tar xvzf h3.tgz && \
|
||||||
|
cd h3-4.0.1 && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 make install && \
|
||||||
|
cp -R /h3/usr / && \
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||||
|
tar xvzf h3-pg.tgz && \
|
||||||
|
cd h3-pg-4.0.1 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
# plv8 still sometimes crashes during the creation
|
||||||
|
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /h3/usr /
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove now-useless PGXS src infrastructure
|
||||||
|
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
# GLIBC 2.34 for plv8.
|
||||||
|
# Debian bullseye provides GLIBC 2.31, so we install the library from testing
|
||||||
|
#
|
||||||
|
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||||
|
# so that we don't need to put this in another layer.
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
echo "Installing GLIBC 2.34" && \
|
||||||
|
echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
||||||
|
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y --no-install-recommends -t testing libc6 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
172
Dockerfile.compute-node-v15
Normal file
172
Dockerfile.compute-node-v15
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
#
|
||||||
|
# This file is identical to the Dockerfile.compute-node-v14 file
|
||||||
|
# except for the version of Postgres that is built.
|
||||||
|
#
|
||||||
|
|
||||||
|
ARG TAG=pinned
|
||||||
|
# apparently, ARGs don't get replaced in RUN commands in kaniko
|
||||||
|
# ARG POSTGIS_VERSION=3.3.0
|
||||||
|
# ARG PLV8_VERSION=3.1.4
|
||||||
|
# ARG PG_VERSION=v15
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||||
|
libcurl4-openssl-dev libossp-uuid-dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
COPY vendor/postgres-v15 postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-uuid=ossp && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
# PostGIS compiles against neon postgres sources without changes. Perhaps we
|
||||||
|
# could even use the upstream binaries, compiled against vanilla Postgres, but
|
||||||
|
# it would require some investigation to check that it works, and also keeps
|
||||||
|
# working in the future. So for now, we compile our own binaries.
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc wget
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.0.tar.gz && \
|
||||||
|
tar xvzf postgis-3.3.0.tar.gz && \
|
||||||
|
cd postgis-3.3.0 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
./configure && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git curl wget make ninja-build build-essential libncurses5 python3-dev pkg-config libc++-dev libc++abi-dev libglib2.0-dev
|
||||||
|
|
||||||
|
# https://github.com/plv8/plv8/issues/475
|
||||||
|
# Debian bullseye provides binutils 2.35 when >= 2.38 is necessary
|
||||||
|
RUN echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
||||||
|
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y --no-install-recommends -t testing binutils
|
||||||
|
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||||
|
tar xvzf v3.1.4.tar.gz && \
|
||||||
|
cd plv8-3.1.4 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||||
|
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove now-useless PGXS src infrastructure
|
||||||
|
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
# TODO: Check if we can make the extension setup more modular versus a linear build
|
||||||
|
# currently plv8-build copies the output /usr/local/pgsql from postgis-build, etc#
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
# GLIBC 2.34 for plv8.
|
||||||
|
# Debian bullseye provides GLIBC 2.31, so we install the library from testing
|
||||||
|
#
|
||||||
|
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||||
|
# so that we don't need to put this in another layer.
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
echo "Installing GLIBC 2.34" && \
|
||||||
|
echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
||||||
|
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y --no-install-recommends -t testing libc6 && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
88
Dockerfile.compute-node.legacy
Normal file
88
Dockerfile.compute-node.legacy
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
#
|
||||||
|
# Legacy version of the Dockerfile for the compute node.
|
||||||
|
# Used by e2e CI. Building Dockerfile.compute-node will take
|
||||||
|
# unreasonable ammount of time without v2 runners.
|
||||||
|
#
|
||||||
|
# TODO: remove once cloud repo CI is moved to v2 runners.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Allow specifiyng different compute-tools tag and image repo, so we are
|
||||||
|
# able to use different images
|
||||||
|
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
ARG IMAGE=compute-tools
|
||||||
|
ARG TAG=latest
|
||||||
|
|
||||||
|
#
|
||||||
|
# Image with pre-built tools
|
||||||
|
#
|
||||||
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-deps
|
||||||
|
# Only to get ready compute_ctl binary as deppendency
|
||||||
|
|
||||||
|
#
|
||||||
|
# Image with Postgres build deps
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||||
|
libcurl4-openssl-dev libossp-uuid-dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Image with built Postgres
|
||||||
|
#
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
|
||||||
|
# Add user postgres
|
||||||
|
RUN adduser postgres
|
||||||
|
RUN mkdir /pg && chown postgres:postgres /pg
|
||||||
|
|
||||||
|
# Copy source files
|
||||||
|
# version 14 is default for now
|
||||||
|
COPY ./vendor/postgres-v14 /pg/
|
||||||
|
COPY ./pgxn /pg/
|
||||||
|
|
||||||
|
# Build and install Postgres locally
|
||||||
|
RUN mkdir /pg/compute_build && cd /pg/compute_build && \
|
||||||
|
../configure CFLAGS='-O2 -g3' --prefix=$(pwd)/postgres_bin --enable-debug --with-uuid=ossp && \
|
||||||
|
# Install main binaries and contribs
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install
|
||||||
|
|
||||||
|
# Install neon contrib
|
||||||
|
RUN make MAKELEVEL=0 PG_CONFIG=/pg/compute_build/postgres_bin/bin/pg_config -j $(getconf _NPROCESSORS_ONLN) -C /pg/neon install
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
WORKDIR /pg
|
||||||
|
|
||||||
|
#
|
||||||
|
# Final compute node image to be exported
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
|
||||||
|
# libreadline-dev is required to run psql
|
||||||
|
RUN apt-get update && apt-get -yq install libreadline-dev libossp-uuid-dev
|
||||||
|
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute
|
||||||
|
|
||||||
|
# Copy ready Postgres binaries
|
||||||
|
COPY --from=pg-build /pg/compute_build/postgres_bin /usr/local
|
||||||
|
|
||||||
|
# Copy binaries from compute-tools
|
||||||
|
COPY --from=compute-deps /usr/local/bin/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# XXX: temporary symlink for compatibility with old control-plane
|
||||||
|
RUN ln -s /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||||
|
|
||||||
|
# Add postgres shared objects to the search path
|
||||||
|
RUN echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
USER postgres
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
207
Makefile
207
Makefile
@@ -20,18 +20,18 @@ else
|
|||||||
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# Seccomp BPF is only available for Linux
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
ifeq ($(UNAME_S),Linux)
|
ifeq ($(UNAME_S),Linux)
|
||||||
# Seccomp BPF is only available for Linux
|
|
||||||
PG_CONFIGURE_OPTS += --with-libseccomp
|
PG_CONFIGURE_OPTS += --with-libseccomp
|
||||||
else ifeq ($(UNAME_S),Darwin)
|
endif
|
||||||
# macOS with brew-installed openssl requires explicit paths
|
|
||||||
# It can be configured with OPENSSL_PREFIX variable
|
# macOS with brew-installed openssl requires explicit paths
|
||||||
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
# It can be configured with OPENSSL_PREFIX variable
|
||||||
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
UNAME_S := $(shell uname -s)
|
||||||
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
|
ifeq ($(UNAME_S),Darwin)
|
||||||
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
|
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
||||||
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
|
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Use -C option so that when PostgreSQL "make install" installs the
|
# Use -C option so that when PostgreSQL "make install" installs the
|
||||||
@@ -61,115 +61,130 @@ all: neon postgres neon-pg-ext
|
|||||||
#
|
#
|
||||||
# The 'postgres_ffi' depends on the Postgres headers.
|
# The 'postgres_ffi' depends on the Postgres headers.
|
||||||
.PHONY: neon
|
.PHONY: neon
|
||||||
neon: postgres-headers
|
neon: postgres-v14-headers postgres-v15-headers
|
||||||
+@echo "Compiling Neon"
|
+@echo "Compiling Neon"
|
||||||
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
||||||
|
|
||||||
### PostgreSQL parts
|
### PostgreSQL parts
|
||||||
# Some rules are duplicated for Postgres v14 and 15. We may want to refactor
|
# The rules are duplicated for Postgres v14 and 15. We may want to refactor
|
||||||
# to avoid the duplication in the future, but it's tolerable for now.
|
# to avoid the duplication in the future, but it's tolerable for now.
|
||||||
#
|
#
|
||||||
$(POSTGRES_INSTALL_DIR)/build/%/config.status:
|
$(POSTGRES_INSTALL_DIR)/build/v14/config.status:
|
||||||
+@echo "Configuring Postgres $* build"
|
+@echo "Configuring Postgres v14 build"
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/$*
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v14
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/$* && \
|
(cd $(POSTGRES_INSTALL_DIR)/build/v14 && \
|
||||||
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure \
|
$(ROOT_PROJECT_DIR)/vendor/postgres-v14/configure CFLAGS='$(PG_CFLAGS)' \
|
||||||
CFLAGS='$(PG_CFLAGS)' \
|
|
||||||
$(PG_CONFIGURE_OPTS) \
|
$(PG_CONFIGURE_OPTS) \
|
||||||
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$* > configure.log)
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v14 > configure.log)
|
||||||
|
|
||||||
|
$(POSTGRES_INSTALL_DIR)/build/v15/config.status:
|
||||||
|
+@echo "Configuring Postgres v15 build"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/v15 && \
|
||||||
|
$(ROOT_PROJECT_DIR)/vendor/postgres-v15/configure CFLAGS='$(PG_CFLAGS)' \
|
||||||
|
$(PG_CONFIGURE_OPTS) \
|
||||||
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v15 > configure.log)
|
||||||
|
|
||||||
# nicer alias to run 'configure'
|
# nicer alias to run 'configure'
|
||||||
# Note: I've been unable to use templates for this part of our configuration.
|
.PHONY: postgres-v14-configure
|
||||||
# I'm not sure why it wouldn't work, but this is the only place (apart from
|
postgres-v14-configure: $(POSTGRES_INSTALL_DIR)/build/v14/config.status
|
||||||
# the "build-all-versions" entry points) where direct mention of PostgreSQL
|
|
||||||
# versions is used.
|
.PHONY: postgres-v15-configure
|
||||||
.PHONY: postgres-configure-v15
|
postgres-v15-configure: $(POSTGRES_INSTALL_DIR)/build/v15/config.status
|
||||||
postgres-configure-v15: $(POSTGRES_INSTALL_DIR)/build/v15/config.status
|
|
||||||
.PHONY: postgres-configure-v14
|
|
||||||
postgres-configure-v14: $(POSTGRES_INSTALL_DIR)/build/v14/config.status
|
|
||||||
|
|
||||||
# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)/<version>/include
|
# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)/<version>/include
|
||||||
.PHONY: postgres-headers-%
|
.PHONY: postgres-v14-headers
|
||||||
postgres-headers-%: postgres-configure-%
|
postgres-v14-headers: postgres-v14-configure
|
||||||
+@echo "Installing PostgreSQL $* headers"
|
+@echo "Installing PostgreSQL v14 headers"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/include MAKELEVEL=0 install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/include MAKELEVEL=0 install
|
||||||
|
|
||||||
|
.PHONY: postgres-v15-headers
|
||||||
|
postgres-v15-headers: postgres-v15-configure
|
||||||
|
+@echo "Installing PostgreSQL v15 headers"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/include MAKELEVEL=0 install
|
||||||
|
|
||||||
# Compile and install PostgreSQL
|
# Compile and install PostgreSQL
|
||||||
.PHONY: postgres-%
|
.PHONY: postgres-v14
|
||||||
postgres-%: postgres-configure-% \
|
postgres-v14: postgres-v14-configure \
|
||||||
postgres-headers-% # to prevent `make install` conflicts with neon's `postgres-headers`
|
postgres-v14-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
||||||
+@echo "Compiling PostgreSQL $*"
|
+@echo "Compiling PostgreSQL v14"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 install
|
||||||
+@echo "Compiling libpq $*"
|
+@echo "Compiling libpq v14"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq install
|
||||||
+@echo "Compiling pg_prewarm $*"
|
+@echo "Compiling pg_buffercache v14"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache install
|
||||||
+@echo "Compiling pg_buffercache $*"
|
+@echo "Compiling pageinspect v14"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect install
|
||||||
+@echo "Compiling pageinspect $*"
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
|
||||||
|
|
||||||
.PHONY: postgres-clean-%
|
.PHONY: postgres-v15
|
||||||
postgres-clean-%:
|
postgres-v15: postgres-v15-configure \
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 clean
|
postgres-v15-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache clean
|
+@echo "Compiling PostgreSQL v15"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect clean
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 install
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq clean
|
+@echo "Compiling libpq v15"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq install
|
||||||
.PHONY: neon-pg-ext-%
|
+@echo "Compiling pg_buffercache v15"
|
||||||
neon-pg-ext-%: postgres-%
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache install
|
||||||
+@echo "Compiling neon $*"
|
+@echo "Compiling pageinspect v15"
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-$*
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect install
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-$* \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install
|
|
||||||
+@echo "Compiling neon_walredo $*"
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$*
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install
|
|
||||||
+@echo "Compiling neon_test_utils $*"
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$*
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install
|
|
||||||
|
|
||||||
.PHONY: neon-pg-ext-clean-%
|
|
||||||
neon-pg-ext-clean-%:
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_walredo-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_test_utils-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
|
||||||
|
|
||||||
.PHONY: neon-pg-ext
|
|
||||||
neon-pg-ext: \
|
|
||||||
neon-pg-ext-v14 \
|
|
||||||
neon-pg-ext-v15
|
|
||||||
|
|
||||||
.PHONY: neon-pg-ext-clean
|
|
||||||
neon-pg-ext-clean: \
|
|
||||||
neon-pg-ext-clean-v14 \
|
|
||||||
neon-pg-ext-clean-v15
|
|
||||||
|
|
||||||
# shorthand to build all Postgres versions
|
# shorthand to build all Postgres versions
|
||||||
.PHONY: postgres
|
postgres: postgres-v14 postgres-v15
|
||||||
postgres: \
|
|
||||||
postgres-v14 \
|
|
||||||
postgres-v15
|
|
||||||
|
|
||||||
.PHONY: postgres-headers
|
.PHONY: postgres-v14-clean
|
||||||
postgres-headers: \
|
postgres-v14-clean:
|
||||||
postgres-headers-v14 \
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 clean
|
||||||
postgres-headers-v15
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq clean
|
||||||
|
|
||||||
.PHONY: postgres-clean
|
.PHONY: postgres-v15-clean
|
||||||
postgres-clean: \
|
postgres-v15-clean:
|
||||||
postgres-clean-v14 \
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 clean
|
||||||
postgres-clean-v15
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect clean
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq clean
|
||||||
|
|
||||||
|
neon-pg-ext-v14: postgres-v14
|
||||||
|
+@echo "Compiling neon v14"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v14 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
||||||
|
+@echo "Compiling neon_test_utils" v14
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
||||||
|
|
||||||
|
neon-pg-ext-v15: postgres-v15
|
||||||
|
+@echo "Compiling neon v15"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v15 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
||||||
|
+@echo "Compiling neon_test_utils" v15
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15 && \
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
||||||
|
|
||||||
|
.PHONY: neon-pg-ext-clean
|
||||||
|
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon clean
|
||||||
|
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils clean
|
||||||
|
|
||||||
|
neon-pg-ext: neon-pg-ext-v14 neon-pg-ext-v15
|
||||||
|
postgres-headers: postgres-v14-headers postgres-v15-headers
|
||||||
|
postgres-clean: postgres-v14-clean postgres-v15-clean
|
||||||
|
|
||||||
# This doesn't remove the effects of 'configure'.
|
# This doesn't remove the effects of 'configure'.
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: postgres-clean neon-pg-ext-clean
|
clean:
|
||||||
|
cd $(POSTGRES_INSTALL_DIR)/build/v14 && $(MAKE) clean
|
||||||
|
cd $(POSTGRES_INSTALL_DIR)/build/v15 && $(MAKE) clean
|
||||||
$(CARGO_CMD_PREFIX) cargo clean
|
$(CARGO_CMD_PREFIX) cargo clean
|
||||||
|
cd pgxn/neon && $(MAKE) clean
|
||||||
|
cd pgxn/neon_test_utils && $(MAKE) clean
|
||||||
|
|
||||||
# This removes everything
|
# This removes everything
|
||||||
.PHONY: distclean
|
.PHONY: distclean
|
||||||
|
|||||||
65
README.md
65
README.md
@@ -2,20 +2,29 @@
|
|||||||
|
|
||||||
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||||
|
|
||||||
|
The project used to be called "Zenith". Many of the commands and code comments
|
||||||
|
still refer to "zenith", but we are in the process of renaming things.
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
Try the [Neon Free Tier](https://neon.tech/docs/introduction/technical-preview-free-tier/) to create a serverless Postgres instance. Then connect to it with your preferred Postgres client (psql, dbeaver, etc) or use the online [SQL Editor](https://neon.tech/docs/get-started-with-neon/query-with-neon-sql-editor/). See [Connect from any application](https://neon.tech/docs/connect/connect-from-any-app/) for connection instructions.
|
[Join the waitlist](https://neon.tech/) for our free tier to receive your serverless postgres instance. Then connect to it with your preferred postgres client (psql, dbeaver, etc) or use the online SQL editor.
|
||||||
|
|
||||||
Alternatively, compile and run the project [locally](#running-local-installation).
|
Alternatively, compile and run the project [locally](#running-local-installation).
|
||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||
A Neon installation consists of compute nodes and the Neon storage engine. Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
|
A Neon installation consists of compute nodes and a Neon storage engine.
|
||||||
|
|
||||||
|
Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
|
||||||
|
|
||||||
The Neon storage engine consists of two major components:
|
The Neon storage engine consists of two major components:
|
||||||
- Pageserver. Scalable storage backend for the compute nodes.
|
- Pageserver. Scalable storage backend for the compute nodes.
|
||||||
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
- WAL service. The service receives WAL from the compute node and ensures that it is stored durably.
|
||||||
|
|
||||||
See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more information.
|
Pageserver consists of:
|
||||||
|
- Repository - Neon storage implementation.
|
||||||
|
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
||||||
|
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
||||||
|
- WAL redo - service that builds pages from base images and WAL records on Page service request
|
||||||
|
|
||||||
## Running local installation
|
## Running local installation
|
||||||
|
|
||||||
@@ -26,13 +35,12 @@ See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more inf
|
|||||||
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||||
```bash
|
```bash
|
||||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||||
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler
|
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client
|
||||||
```
|
```
|
||||||
* On Fedora, these packages are needed:
|
* On Fedora, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||||
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib
|
||||||
protobuf-devel
|
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -45,7 +53,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
|||||||
1. Install XCode and dependencies
|
1. Install XCode and dependencies
|
||||||
```
|
```
|
||||||
xcode-select --install
|
xcode-select --install
|
||||||
brew install protobuf openssl flex bison
|
brew install protobuf etcd openssl
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -108,7 +116,7 @@ make -j`sysctl -n hw.logicalcpu`
|
|||||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||||
|
|
||||||
To run the integration tests or Python scripts (not required to use the code), install
|
To run the integration tests or Python scripts (not required to use the code), install
|
||||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory.
|
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry](https://python-poetry.org/)) in the project directory.
|
||||||
|
|
||||||
|
|
||||||
#### Running neon database
|
#### Running neon database
|
||||||
@@ -117,26 +125,24 @@ Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (r
|
|||||||
# Create repository in .neon with proper paths to binaries and data
|
# Create repository in .neon with proper paths to binaries and data
|
||||||
# Later that would be responsibility of a package install script
|
# Later that would be responsibility of a package install script
|
||||||
> ./target/debug/neon_local init
|
> ./target/debug/neon_local init
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
Starting pageserver at '127.0.0.1:64000' in '.neon'
|
||||||
|
|
||||||
# start pageserver, safekeeper, and broker for their intercommunication
|
Pageserver started
|
||||||
|
Successfully initialized timeline 7dd0907914ac399ff3be45fb252bfdb7
|
||||||
|
Stopping pageserver gracefully...done!
|
||||||
|
|
||||||
|
# start pageserver and safekeeper
|
||||||
> ./target/debug/neon_local start
|
> ./target/debug/neon_local start
|
||||||
Starting neon broker at 127.0.0.1:50051
|
Starting etcd broker using /usr/bin/etcd
|
||||||
storage_broker started, pid: 2918372
|
Starting pageserver at '127.0.0.1:64000' in '.neon'
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
|
||||||
pageserver started, pid: 2918386
|
|
||||||
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
|
||||||
safekeeper 1 started, pid: 2918437
|
|
||||||
|
|
||||||
# create initial tenant and use it as a default for every future neon_local invocation
|
Pageserver started
|
||||||
> ./target/debug/neon_local tenant create --set-default
|
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'
|
||||||
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
Safekeeper started
|
||||||
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
|
||||||
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
|
||||||
|
|
||||||
# start postgres compute node
|
# start postgres compute node
|
||||||
> ./target/debug/neon_local pg start main
|
> ./target/debug/neon_local pg start main
|
||||||
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
Starting new postgres main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
||||||
|
|
||||||
@@ -217,27 +223,22 @@ Ensure your dependencies are installed as described [here](https://github.com/ne
|
|||||||
```sh
|
```sh
|
||||||
git clone --recursive https://github.com/neondatabase/neon.git
|
git clone --recursive https://github.com/neondatabase/neon.git
|
||||||
|
|
||||||
|
# either:
|
||||||
CARGO_BUILD_FLAGS="--features=testing" make
|
CARGO_BUILD_FLAGS="--features=testing" make
|
||||||
|
# or:
|
||||||
|
make debug
|
||||||
|
|
||||||
./scripts/pytest
|
./scripts/pytest
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
[/docs/](/docs/) Contains a top-level overview of all available markdown documentation.
|
Now we use README files to cover design ideas and overall architecture for each module and `rustdoc` style documentation comments. See also [/docs/](/docs/) a top-level overview of all available markdown documentation.
|
||||||
|
|
||||||
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
||||||
|
|
||||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||||
|
|
||||||
See also README files in some source directories, and `rustdoc` style documentation comments.
|
|
||||||
|
|
||||||
Other resources:
|
|
||||||
|
|
||||||
- [SELECT 'Hello, World'](https://neon.tech/blog/hello-world/): Blog post by Nikita Shamgunov on the high level architecture
|
|
||||||
- [Architecture decisions in Neon](https://neon.tech/blog/architecture-decisions-in-neon/): Blog post by Heikki Linnakangas
|
|
||||||
- [Neon: Serverless PostgreSQL!](https://www.youtube.com/watch?v=rES0yzeERns): Presentation on storage system by Heikki Linnakangas in the CMU Database Group seminar series
|
|
||||||
|
|
||||||
### Postgres-specific terms
|
### Postgres-specific terms
|
||||||
|
|
||||||
Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used.
|
Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used.
|
||||||
|
|||||||
188
cli-v2-story.md
Normal file
188
cli-v2-story.md
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
Create a new Zenith repository in the current directory:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli init
|
||||||
|
The files belonging to this database system will be owned by user "heikki".
|
||||||
|
This user must also own the server process.
|
||||||
|
|
||||||
|
The database cluster will be initialized with locale "en_GB.UTF-8".
|
||||||
|
The default database encoding has accordingly been set to "UTF8".
|
||||||
|
The default text search configuration will be set to "english".
|
||||||
|
|
||||||
|
Data page checksums are disabled.
|
||||||
|
|
||||||
|
creating directory tmp ... ok
|
||||||
|
creating subdirectories ... ok
|
||||||
|
selecting dynamic shared memory implementation ... posix
|
||||||
|
selecting default max_connections ... 100
|
||||||
|
selecting default shared_buffers ... 128MB
|
||||||
|
selecting default time zone ... Europe/Helsinki
|
||||||
|
creating configuration files ... ok
|
||||||
|
running bootstrap script ... ok
|
||||||
|
performing post-bootstrap initialization ... ok
|
||||||
|
syncing data to disk ... ok
|
||||||
|
|
||||||
|
initdb: warning: enabling "trust" authentication for local connections
|
||||||
|
You can change this by editing pg_hba.conf or using the option -A, or
|
||||||
|
--auth-local and --auth-host, the next time you run initdb.
|
||||||
|
new zenith repository was created in .zenith
|
||||||
|
|
||||||
|
Initially, there is only one branch:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
||||||
|
main
|
||||||
|
|
||||||
|
Start a local Postgres instance on the branch:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start main
|
||||||
|
Creating data directory from snapshot at 0/15FFB08...
|
||||||
|
waiting for server to start....2021-04-13 09:27:43.919 EEST [984664] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
||||||
|
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv6 address "::1", port 5432
|
||||||
|
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv4 address "127.0.0.1", port 5432
|
||||||
|
2021-04-13 09:27:43.927 EEST [984664] LOG: listening on Unix socket "/tmp/.s.PGSQL.5432"
|
||||||
|
2021-04-13 09:27:43.939 EEST [984665] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
||||||
|
2021-04-13 09:27:43.939 EEST [984665] LOG: creating missing WAL directory "pg_wal/archive_status"
|
||||||
|
2021-04-13 09:27:44.189 EEST [984665] LOG: database system was not properly shut down; automatic recovery in progress
|
||||||
|
2021-04-13 09:27:44.195 EEST [984665] LOG: invalid record length at 0/15FFB80: wanted 24, got 0
|
||||||
|
2021-04-13 09:27:44.195 EEST [984665] LOG: redo is not required
|
||||||
|
2021-04-13 09:27:44.225 EEST [984664] LOG: database system is ready to accept connections
|
||||||
|
done
|
||||||
|
server started
|
||||||
|
|
||||||
|
Run some commands against it:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "create table foo (t text);"
|
||||||
|
CREATE TABLE
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "insert into foo values ('inserted on the main branch');"
|
||||||
|
INSERT 0 1
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "select * from foo"
|
||||||
|
t
|
||||||
|
-----------------------------
|
||||||
|
inserted on the main branch
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
Create a new branch called 'experimental'. We create it from the
|
||||||
|
current end of the 'main' branch, but you could specify a different
|
||||||
|
LSN as the start point instead.
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch experimental main
|
||||||
|
branching at end of WAL: 0/161F478
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
||||||
|
experimental
|
||||||
|
main
|
||||||
|
|
||||||
|
Start another Postgres instance off the 'experimental' branch:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
||||||
|
Creating data directory from snapshot at 0/15FFB08...
|
||||||
|
waiting for server to start....2021-04-13 09:28:41.874 EEST [984766] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
||||||
|
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv6 address "::1", port 5433
|
||||||
|
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
||||||
|
2021-04-13 09:28:41.883 EEST [984766] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
||||||
|
2021-04-13 09:28:41.896 EEST [984767] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
||||||
|
2021-04-13 09:28:42.265 EEST [984767] LOG: database system was not properly shut down; automatic recovery in progress
|
||||||
|
2021-04-13 09:28:42.269 EEST [984767] LOG: redo starts at 0/15FFB80
|
||||||
|
2021-04-13 09:28:42.272 EEST [984767] LOG: invalid record length at 0/161F4B0: wanted 24, got 0
|
||||||
|
2021-04-13 09:28:42.272 EEST [984767] LOG: redo done at 0/161F478 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
||||||
|
2021-04-13 09:28:42.321 EEST [984766] LOG: database system is ready to accept connections
|
||||||
|
done
|
||||||
|
server started
|
||||||
|
|
||||||
|
Insert some a row on the 'experimental' branch:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
||||||
|
t
|
||||||
|
-----------------------------
|
||||||
|
inserted on the main branch
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "insert into foo values ('inserted on experimental')"
|
||||||
|
INSERT 0 1
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
||||||
|
t
|
||||||
|
-----------------------------
|
||||||
|
inserted on the main branch
|
||||||
|
inserted on experimental
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
See that the other Postgres instance is still running on 'main' branch on port 5432:
|
||||||
|
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5432 -c "select * from foo"
|
||||||
|
t
|
||||||
|
-----------------------------
|
||||||
|
inserted on the main branch
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Everything is stored in the .zenith directory:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/
|
||||||
|
total 12
|
||||||
|
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 datadirs
|
||||||
|
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 refs
|
||||||
|
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 timelines
|
||||||
|
|
||||||
|
The 'datadirs' directory contains the datadirs of the running instances:
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/
|
||||||
|
total 8
|
||||||
|
drwx------ 18 heikki heikki 4096 Apr 13 09:27 3c0c634c1674079b2c6d4edf7c91523e
|
||||||
|
drwx------ 18 heikki heikki 4096 Apr 13 09:28 697e3c103d4b1763cd6e82e4ff361d76
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/3c0c634c1674079b2c6d4edf7c91523e/
|
||||||
|
total 124
|
||||||
|
drwxr-xr-x 5 heikki heikki 4096 Apr 13 09:27 base
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 global
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_commit_ts
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_dynshmem
|
||||||
|
-rw------- 1 heikki heikki 4760 Apr 13 09:27 pg_hba.conf
|
||||||
|
-rw------- 1 heikki heikki 1636 Apr 13 09:27 pg_ident.conf
|
||||||
|
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:32 pg_logical
|
||||||
|
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 pg_multixact
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_notify
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_replslot
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_serial
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_snapshots
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_stat
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:34 pg_stat_tmp
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_subtrans
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_tblspc
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_twophase
|
||||||
|
-rw------- 1 heikki heikki 3 Apr 13 09:27 PG_VERSION
|
||||||
|
lrwxrwxrwx 1 heikki heikki 52 Apr 13 09:27 pg_wal -> ../../timelines/3c0c634c1674079b2c6d4edf7c91523e/wal
|
||||||
|
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_xact
|
||||||
|
-rw------- 1 heikki heikki 88 Apr 13 09:27 postgresql.auto.conf
|
||||||
|
-rw------- 1 heikki heikki 28688 Apr 13 09:27 postgresql.conf
|
||||||
|
-rw------- 1 heikki heikki 96 Apr 13 09:27 postmaster.opts
|
||||||
|
-rw------- 1 heikki heikki 149 Apr 13 09:27 postmaster.pid
|
||||||
|
|
||||||
|
Note how 'pg_wal' is just a symlink to the 'timelines' directory. The
|
||||||
|
datadir is ephemeral, you can delete it at any time, and it can be reconstructed
|
||||||
|
from the snapshots and WAL stored in the 'timelines' directory. So if you push/pull
|
||||||
|
the repository, the 'datadirs' are not included. (They are like git working trees)
|
||||||
|
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ killall -9 postgres
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ rm -rf .zenith/datadirs/*
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
||||||
|
Creating data directory from snapshot at 0/15FFB08...
|
||||||
|
waiting for server to start....2021-04-13 09:37:05.476 EEST [985340] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
||||||
|
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv6 address "::1", port 5433
|
||||||
|
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
||||||
|
2021-04-13 09:37:05.487 EEST [985340] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
||||||
|
2021-04-13 09:37:05.498 EEST [985341] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
||||||
|
2021-04-13 09:37:05.808 EEST [985341] LOG: database system was not properly shut down; automatic recovery in progress
|
||||||
|
2021-04-13 09:37:05.813 EEST [985341] LOG: redo starts at 0/15FFB80
|
||||||
|
2021-04-13 09:37:05.815 EEST [985341] LOG: invalid record length at 0/161F770: wanted 24, got 0
|
||||||
|
2021-04-13 09:37:05.815 EEST [985341] LOG: redo done at 0/161F738 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
||||||
|
2021-04-13 09:37:05.866 EEST [985340] LOG: database system is ready to accept connections
|
||||||
|
done
|
||||||
|
server started
|
||||||
|
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
||||||
|
t
|
||||||
|
-----------------------------
|
||||||
|
inserted on the main branch
|
||||||
|
inserted on experimental
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
@@ -1,28 +1,21 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "compute_tools"
|
name = "compute_tools"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition.workspace = true
|
edition = "2021"
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow = "1.0"
|
||||||
chrono.workspace = true
|
chrono = "0.4"
|
||||||
clap.workspace = true
|
clap = "3.0"
|
||||||
futures.workspace = true
|
env_logger = "0.9"
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { version = "0.14", features = ["full"] }
|
||||||
notify.workspace = true
|
log = { version = "0.4", features = ["std", "serde"] }
|
||||||
opentelemetry.workspace = true
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
postgres.workspace = true
|
regex = "1"
|
||||||
regex.workspace = true
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde.workspace = true
|
serde_json = "1"
|
||||||
serde_json.workspace = true
|
tar = "0.4"
|
||||||
tar.workspace = true
|
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
|
||||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
tokio-postgres.workspace = true
|
url = "2.2.2"
|
||||||
tracing.workspace = true
|
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||||
tracing-opentelemetry.workspace = true
|
|
||||||
tracing-subscriber.workspace = true
|
|
||||||
tracing-utils.workspace = true
|
|
||||||
url.workspace = true
|
|
||||||
|
|
||||||
workspace_hack.workspace = true
|
|
||||||
|
|||||||
@@ -19,10 +19,6 @@ Also `compute_ctl` spawns two separate service threads:
|
|||||||
- `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
- `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
||||||
last activity requests.
|
last activity requests.
|
||||||
|
|
||||||
If the `vm-informant` binary is present at `/bin/vm-informant`, it will also be started. For VM
|
|
||||||
compute nodes, `vm-informant` communicates with the VM autoscaling system. It coordinates
|
|
||||||
downscaling and (eventually) will request immediate upscaling under resource pressure.
|
|
||||||
|
|
||||||
Usage example:
|
Usage example:
|
||||||
```sh
|
```sh
|
||||||
compute_ctl -D /var/db/postgres/compute \
|
compute_ctl -D /var/db/postgres/compute \
|
||||||
|
|||||||
@@ -18,10 +18,6 @@
|
|||||||
//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
||||||
//! last activity requests.
|
//! last activity requests.
|
||||||
//!
|
//!
|
||||||
//! If the `vm-informant` binary is present at `/bin/vm-informant`, it will also be started. For VM
|
|
||||||
//! compute nodes, `vm-informant` communicates with the VM autoscaling system. It coordinates
|
|
||||||
//! downscaling and (eventually) will request immediate upscaling under resource pressure.
|
|
||||||
//!
|
|
||||||
//! Usage example:
|
//! Usage example:
|
||||||
//! ```sh
|
//! ```sh
|
||||||
//! compute_ctl -D /var/db/postgres/compute \
|
//! compute_ctl -D /var/db/postgres/compute \
|
||||||
@@ -40,11 +36,10 @@ use std::{thread, time::Duration};
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use tracing::{error, info};
|
use log::{error, info};
|
||||||
|
|
||||||
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
||||||
use compute_tools::http::api::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::informant::spawn_vm_informant_if_present;
|
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
@@ -53,21 +48,56 @@ use compute_tools::spec::*;
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
// TODO: re-use `utils::logging` later
|
||||||
|
init_logger(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
let matches = cli().get_matches();
|
// Env variable is set by `cargo`
|
||||||
|
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
|
||||||
|
let matches = clap::App::new("compute_ctl")
|
||||||
|
.version(version.unwrap_or("unknown"))
|
||||||
|
.arg(
|
||||||
|
Arg::new("connstr")
|
||||||
|
.short('C')
|
||||||
|
.long("connstr")
|
||||||
|
.value_name("DATABASE_URL")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgdata")
|
||||||
|
.short('D')
|
||||||
|
.long("pgdata")
|
||||||
|
.value_name("DATADIR")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgbin")
|
||||||
|
.short('b')
|
||||||
|
.long("pgbin")
|
||||||
|
.value_name("POSTGRES_PATH"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec")
|
||||||
|
.short('s')
|
||||||
|
.long("spec")
|
||||||
|
.value_name("SPEC_JSON"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec-path")
|
||||||
|
.short('S')
|
||||||
|
.long("spec-path")
|
||||||
|
.value_name("SPEC_PATH"),
|
||||||
|
)
|
||||||
|
.get_matches();
|
||||||
|
|
||||||
let pgdata = matches
|
let pgdata = matches.value_of("pgdata").expect("PGDATA path is required");
|
||||||
.get_one::<String>("pgdata")
|
|
||||||
.expect("PGDATA path is required");
|
|
||||||
let connstr = matches
|
let connstr = matches
|
||||||
.get_one::<String>("connstr")
|
.value_of("connstr")
|
||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec = matches.get_one::<String>("spec");
|
let spec = matches.value_of("spec");
|
||||||
let spec_path = matches.get_one::<String>("spec-path");
|
let spec_path = matches.value_of("spec-path");
|
||||||
|
|
||||||
// Try to use just 'postgres' if no path is provided
|
// Try to use just 'postgres' if no path is provided
|
||||||
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
let pgbin = matches.value_of("pgbin").unwrap_or("postgres");
|
||||||
|
|
||||||
let spec: ComputeSpec = match spec {
|
let spec: ComputeSpec = match spec {
|
||||||
// First, try to get cluster spec from the cli argument
|
// First, try to get cluster spec from the cli argument
|
||||||
@@ -84,29 +114,6 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Extract OpenTelemetry context for the startup actions from the spec, and
|
|
||||||
// attach it to the current tracing context.
|
|
||||||
//
|
|
||||||
// This is used to propagate the context for the 'start_compute' operation
|
|
||||||
// from the neon control plane. This allows linking together the wider
|
|
||||||
// 'start_compute' operation that creates the compute container, with the
|
|
||||||
// startup actions here within the container.
|
|
||||||
//
|
|
||||||
// Switch to the startup context here, and exit it once the startup has
|
|
||||||
// completed and Postgres is up and running.
|
|
||||||
//
|
|
||||||
// NOTE: This is supposed to only cover the *startup* actions. Once
|
|
||||||
// postgres is configured and up-and-running, we exit this span. Any other
|
|
||||||
// actions that are performed on incoming HTTP requests, for example, are
|
|
||||||
// performed in separate spans.
|
|
||||||
let startup_context_guard = if let Some(ref carrier) = spec.startup_tracing_context {
|
|
||||||
use opentelemetry::propagation::TextMapPropagator;
|
|
||||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
|
||||||
Some(TraceContextPropagator::new().extract(carrier).attach())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let pageserver_connstr = spec
|
let pageserver_connstr = spec
|
||||||
.cluster
|
.cluster
|
||||||
.settings
|
.settings
|
||||||
@@ -132,7 +139,7 @@ fn main() -> Result<()> {
|
|||||||
tenant,
|
tenant,
|
||||||
timeline,
|
timeline,
|
||||||
pageserver_connstr,
|
pageserver_connstr,
|
||||||
metrics: ComputeMetrics::default(),
|
metrics: ComputeMetrics::new(),
|
||||||
state: RwLock::new(ComputeState::new()),
|
state: RwLock::new(ComputeState::new()),
|
||||||
};
|
};
|
||||||
let compute = Arc::new(compute_state);
|
let compute = Arc::new(compute_state);
|
||||||
@@ -141,98 +148,28 @@ fn main() -> Result<()> {
|
|||||||
// requests, while configuration is still in progress.
|
// requests, while configuration is still in progress.
|
||||||
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
||||||
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
||||||
// Also spawn the thread responsible for handling the VM informant -- if it's present
|
|
||||||
let _vm_informant_handle = spawn_vm_informant_if_present().expect("cannot launch VM informant");
|
|
||||||
|
|
||||||
// Start Postgres
|
// Run compute (Postgres) and hang waiting on it.
|
||||||
let mut delay_exit = false;
|
match compute.prepare_and_run() {
|
||||||
let mut exit_code = None;
|
Ok(ec) => {
|
||||||
let pg = match compute.start_compute() {
|
let code = ec.code().unwrap_or(1);
|
||||||
Ok(pg) => Some(pg),
|
info!("Postgres exited with code {}, shutting down", code);
|
||||||
Err(err) => {
|
exit(code)
|
||||||
error!("could not start the compute node: {:?}", err);
|
}
|
||||||
|
Err(error) => {
|
||||||
|
error!("could not start the compute node: {:?}", error);
|
||||||
|
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.write().unwrap();
|
||||||
state.error = Some(format!("{:?}", err));
|
state.error = Some(format!("{:?}", error));
|
||||||
state.status = ComputeStatus::Failed;
|
state.status = ComputeStatus::Failed;
|
||||||
drop(state);
|
drop(state);
|
||||||
delay_exit = true;
|
|
||||||
None
|
// Keep serving HTTP requests, so the cloud control plane was able to
|
||||||
|
// get the actual error.
|
||||||
|
info!("giving control plane 30s to collect the error before shutdown");
|
||||||
|
thread::sleep(Duration::from_secs(30));
|
||||||
|
info!("shutting down");
|
||||||
|
Err(error)
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
|
||||||
// propagate to Postgres and it will be shut down as well.
|
|
||||||
if let Some(mut pg) = pg {
|
|
||||||
// Startup is finished, exit the startup tracing span
|
|
||||||
drop(startup_context_guard);
|
|
||||||
|
|
||||||
let ecode = pg
|
|
||||||
.wait()
|
|
||||||
.expect("failed to start waiting on Postgres process");
|
|
||||||
info!("Postgres exited with code {}, shutting down", ecode);
|
|
||||||
exit_code = ecode.code()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = compute.check_for_core_dumps() {
|
|
||||||
error!("error while checking for core dumps: {err:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
// If launch failed, keep serving HTTP requests for a while, so the cloud
|
|
||||||
// control plane can get the actual error.
|
|
||||||
if delay_exit {
|
|
||||||
info!("giving control plane 30s to collect the error before shutdown");
|
|
||||||
thread::sleep(Duration::from_secs(30));
|
|
||||||
info!("shutting down");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
|
||||||
// pending traces before we exit.
|
|
||||||
tracing_utils::shutdown_tracing();
|
|
||||||
|
|
||||||
exit(exit_code.unwrap_or(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cli() -> clap::Command {
|
|
||||||
// Env variable is set by `cargo`
|
|
||||||
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
|
||||||
clap::Command::new("compute_ctl")
|
|
||||||
.version(version)
|
|
||||||
.arg(
|
|
||||||
Arg::new("connstr")
|
|
||||||
.short('C')
|
|
||||||
.long("connstr")
|
|
||||||
.value_name("DATABASE_URL")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgdata")
|
|
||||||
.short('D')
|
|
||||||
.long("pgdata")
|
|
||||||
.value_name("DATADIR")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgbin")
|
|
||||||
.short('b')
|
|
||||||
.long("pgbin")
|
|
||||||
.default_value("postgres")
|
|
||||||
.value_name("POSTGRES_PATH"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec")
|
|
||||||
.short('s')
|
|
||||||
.long("spec")
|
|
||||||
.value_name("SPEC_JSON"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec-path")
|
|
||||||
.short('S')
|
|
||||||
.long("spec-path")
|
|
||||||
.value_name("SPEC_PATH"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn verify_cli() {
|
|
||||||
cli().debug_assert()
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
|
use log::error;
|
||||||
use postgres::Client;
|
use postgres::Client;
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{error, instrument};
|
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
#[instrument(skip_all)]
|
pub fn create_writablity_check_data(client: &mut Client) -> Result<()> {
|
||||||
pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
|
||||||
let query = "
|
let query = "
|
||||||
CREATE TABLE IF NOT EXISTS health_check (
|
CREATE TABLE IF NOT EXISTS health_check (
|
||||||
id serial primary key,
|
id serial primary key,
|
||||||
@@ -22,7 +21,6 @@ pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
if client.is_closed() {
|
if client.is_closed() {
|
||||||
|
|||||||
@@ -17,17 +17,17 @@
|
|||||||
use std::fs;
|
use std::fs;
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, ExitStatus, Stdio};
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
|
use log::info;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::{Serialize, Serializer};
|
use serde::{Serialize, Serializer};
|
||||||
use tracing::{info, instrument, warn};
|
|
||||||
|
|
||||||
use crate::checker::create_writability_check_data;
|
use crate::checker::create_writablity_check_data;
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
@@ -91,7 +91,7 @@ pub enum ComputeStatus {
|
|||||||
Failed,
|
Failed,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Serialize)]
|
#[derive(Serialize)]
|
||||||
pub struct ComputeMetrics {
|
pub struct ComputeMetrics {
|
||||||
pub sync_safekeepers_ms: AtomicU64,
|
pub sync_safekeepers_ms: AtomicU64,
|
||||||
pub basebackup_ms: AtomicU64,
|
pub basebackup_ms: AtomicU64,
|
||||||
@@ -99,6 +99,23 @@ pub struct ComputeMetrics {
|
|||||||
pub total_startup_ms: AtomicU64,
|
pub total_startup_ms: AtomicU64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ComputeMetrics {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
sync_safekeepers_ms: AtomicU64::new(0),
|
||||||
|
basebackup_ms: AtomicU64::new(0),
|
||||||
|
config_ms: AtomicU64::new(0),
|
||||||
|
total_startup_ms: AtomicU64::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ComputeMetrics {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ComputeNode {
|
impl ComputeNode {
|
||||||
pub fn set_status(&self, status: ComputeStatus) {
|
pub fn set_status(&self, status: ComputeStatus) {
|
||||||
self.state.write().unwrap().status = status;
|
self.state.write().unwrap().status = status;
|
||||||
@@ -121,7 +138,6 @@ impl ComputeNode {
|
|||||||
|
|
||||||
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
||||||
// unarchive it to `pgdata` directory overriding all its previous content.
|
// unarchive it to `pgdata` directory overriding all its previous content.
|
||||||
#[instrument(skip(self))]
|
|
||||||
fn get_basebackup(&self, lsn: &str) -> Result<()> {
|
fn get_basebackup(&self, lsn: &str) -> Result<()> {
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
@@ -155,14 +171,14 @@ impl ComputeNode {
|
|||||||
|
|
||||||
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
||||||
// and return the reported LSN back to the caller.
|
// and return the reported LSN back to the caller.
|
||||||
#[instrument(skip(self))]
|
|
||||||
fn sync_safekeepers(&self) -> Result<String> {
|
fn sync_safekeepers(&self) -> Result<String> {
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let sync_handle = Command::new(&self.pgbin)
|
let sync_handle = Command::new(&self.pgbin)
|
||||||
.args(["--sync-safekeepers"])
|
.args(&["--sync-safekeepers"])
|
||||||
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::piped())
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("postgres --sync-safekeepers failed to start");
|
.expect("postgres --sync-safekeepers failed to start");
|
||||||
|
|
||||||
@@ -175,10 +191,10 @@ impl ComputeNode {
|
|||||||
|
|
||||||
if !sync_output.status.success() {
|
if !sync_output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
|
"postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}, stderr: {}",
|
||||||
sync_output.status,
|
sync_output.status,
|
||||||
String::from_utf8(sync_output.stdout)
|
String::from_utf8(sync_output.stdout).expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
|
||||||
.expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
|
String::from_utf8(sync_output.stderr).expect("postgres --sync-safekeepers exited, and stderr is not utf-8"),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -198,7 +214,6 @@ impl ComputeNode {
|
|||||||
|
|
||||||
/// Do all the preparations like PGDATA directory creation, configuration,
|
/// Do all the preparations like PGDATA directory creation, configuration,
|
||||||
/// safekeepers sync, basebackup, etc.
|
/// safekeepers sync, basebackup, etc.
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub fn prepare_pgdata(&self) -> Result<()> {
|
pub fn prepare_pgdata(&self) -> Result<()> {
|
||||||
let spec = &self.spec;
|
let spec = &self.spec;
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
@@ -232,27 +247,30 @@ impl ComputeNode {
|
|||||||
|
|
||||||
/// Start Postgres as a child process and manage DBs/roles.
|
/// Start Postgres as a child process and manage DBs/roles.
|
||||||
/// After that this will hang waiting on the postmaster process to exit.
|
/// After that this will hang waiting on the postmaster process to exit.
|
||||||
#[instrument(skip(self))]
|
pub fn run(&self) -> Result<ExitStatus> {
|
||||||
pub fn start_postgres(&self) -> Result<std::process::Child> {
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
|
|
||||||
// Run postgres as a child process.
|
// Run postgres as a child process.
|
||||||
let mut pg = Command::new(&self.pgbin)
|
let mut pg = Command::new(&self.pgbin)
|
||||||
.args(["-D", &self.pgdata])
|
.args(&["-D", &self.pgdata])
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("cannot start postgres process");
|
.expect("cannot start postgres process");
|
||||||
|
|
||||||
wait_for_postgres(&mut pg, pgdata_path)?;
|
// Try default Postgres port if it is not provided
|
||||||
|
let port = self
|
||||||
|
.spec
|
||||||
|
.cluster
|
||||||
|
.settings
|
||||||
|
.find("port")
|
||||||
|
.unwrap_or_else(|| "5432".to_string());
|
||||||
|
wait_for_postgres(&mut pg, &port, pgdata_path)?;
|
||||||
|
|
||||||
Ok(pg)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub fn apply_config(&self) -> Result<()> {
|
|
||||||
// If connection fails,
|
// If connection fails,
|
||||||
// it may be the old node with `zenith_admin` superuser.
|
// it may be the old node with `zenith_admin` superuser.
|
||||||
//
|
//
|
||||||
// In this case we need to connect with old `zenith_admin` name
|
// In this case we need to connect with old `zenith_admin`name
|
||||||
// and create new user. We cannot simply rename connected user,
|
// and create new user. We cannot simply rename connected user,
|
||||||
// but we can create a new one and grant it all privileges.
|
// but we can create a new one and grant it all privileges.
|
||||||
let mut client = match Client::connect(self.connstr.as_str(), NoTls) {
|
let mut client = match Client::connect(self.connstr.as_str(), NoTls) {
|
||||||
@@ -278,43 +296,16 @@ impl ComputeNode {
|
|||||||
Ok(client) => client,
|
Ok(client) => client,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Proceed with post-startup configuration. Note, that order of operations is important.
|
|
||||||
handle_roles(&self.spec, &mut client)?;
|
handle_roles(&self.spec, &mut client)?;
|
||||||
handle_databases(&self.spec, &mut client)?;
|
handle_databases(&self.spec, &mut client)?;
|
||||||
handle_role_deletions(self, &mut client)?;
|
handle_role_deletions(self, &mut client)?;
|
||||||
handle_grants(self, &mut client)?;
|
handle_grants(self, &mut client)?;
|
||||||
create_writability_check_data(&mut client)?;
|
create_writablity_check_data(&mut client)?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
drop(client);
|
drop(client);
|
||||||
|
|
||||||
info!(
|
|
||||||
"finished configuration of compute for project {}",
|
|
||||||
self.spec.cluster.cluster_id
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub fn start_compute(&self) -> Result<std::process::Child> {
|
|
||||||
info!(
|
|
||||||
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
|
||||||
self.spec.cluster.cluster_id,
|
|
||||||
self.spec.operation_uuid.as_ref().unwrap(),
|
|
||||||
self.tenant,
|
|
||||||
self.timeline,
|
|
||||||
);
|
|
||||||
|
|
||||||
self.prepare_pgdata()?;
|
|
||||||
|
|
||||||
let start_time = Utc::now();
|
|
||||||
|
|
||||||
let pg = self.start_postgres()?;
|
|
||||||
|
|
||||||
self.apply_config()?;
|
|
||||||
|
|
||||||
let startup_end_time = Utc::now();
|
let startup_end_time = Utc::now();
|
||||||
|
|
||||||
self.metrics.config_ms.store(
|
self.metrics.config_ms.store(
|
||||||
startup_end_time
|
startup_end_time
|
||||||
.signed_duration_since(start_time)
|
.signed_duration_since(start_time)
|
||||||
@@ -334,70 +325,30 @@ impl ComputeNode {
|
|||||||
|
|
||||||
self.set_status(ComputeStatus::Running);
|
self.set_status(ComputeStatus::Running);
|
||||||
|
|
||||||
Ok(pg)
|
info!(
|
||||||
|
"finished configuration of compute for project {}",
|
||||||
|
self.spec.cluster.cluster_id
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wait for child Postgres process basically forever. In this state Ctrl+C
|
||||||
|
// will propagate to Postgres and it will be shut down as well.
|
||||||
|
let ecode = pg
|
||||||
|
.wait()
|
||||||
|
.expect("failed to start waiting on Postgres process");
|
||||||
|
|
||||||
|
Ok(ecode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for core dumps and collect backtraces.
|
pub fn prepare_and_run(&self) -> Result<ExitStatus> {
|
||||||
//
|
info!(
|
||||||
// EKS worker nodes have following core dump settings:
|
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
||||||
// /proc/sys/kernel/core_pattern -> core
|
self.spec.cluster.cluster_id,
|
||||||
// /proc/sys/kernel/core_uses_pid -> 1
|
self.spec.operation_uuid.as_ref().unwrap(),
|
||||||
// ulimint -c -> unlimited
|
self.tenant,
|
||||||
// which results in core dumps being written to postgres data directory as core.<pid>.
|
self.timeline,
|
||||||
//
|
);
|
||||||
// Use that as a default location and pattern, except macos where core dumps are written
|
|
||||||
// to /cores/ directory by default.
|
|
||||||
pub fn check_for_core_dumps(&self) -> Result<()> {
|
|
||||||
let core_dump_dir = match std::env::consts::OS {
|
|
||||||
"macos" => Path::new("/cores/"),
|
|
||||||
_ => Path::new(&self.pgdata),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Collect core dump paths if any
|
self.prepare_pgdata()?;
|
||||||
info!("checking for core dumps in {}", core_dump_dir.display());
|
self.run()
|
||||||
let files = fs::read_dir(core_dump_dir)?;
|
|
||||||
let cores = files.filter_map(|entry| {
|
|
||||||
let entry = entry.ok()?;
|
|
||||||
let _ = entry.file_name().to_str()?.strip_prefix("core.")?;
|
|
||||||
Some(entry.path())
|
|
||||||
});
|
|
||||||
|
|
||||||
// Print backtrace for each core dump
|
|
||||||
for core_path in cores {
|
|
||||||
warn!(
|
|
||||||
"core dump found: {}, collecting backtrace",
|
|
||||||
core_path.display()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Try first with gdb
|
|
||||||
let backtrace = Command::new("gdb")
|
|
||||||
.args(["--batch", "-q", "-ex", "bt", &self.pgbin])
|
|
||||||
.arg(&core_path)
|
|
||||||
.output();
|
|
||||||
|
|
||||||
// Try lldb if no gdb is found -- that is handy for local testing on macOS
|
|
||||||
let backtrace = match backtrace {
|
|
||||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
|
||||||
warn!("cannot find gdb, trying lldb");
|
|
||||||
Command::new("lldb")
|
|
||||||
.arg("-c")
|
|
||||||
.arg(&core_path)
|
|
||||||
.args(["--batch", "-o", "bt all", "-o", "quit"])
|
|
||||||
.output()
|
|
||||||
}
|
|
||||||
_ => backtrace,
|
|
||||||
}?;
|
|
||||||
|
|
||||||
warn!(
|
|
||||||
"core dump backtrace: {}",
|
|
||||||
String::from_utf8_lossy(&backtrace.stdout)
|
|
||||||
);
|
|
||||||
warn!(
|
|
||||||
"debugger stderr: {}",
|
|
||||||
String::from_utf8_lossy(&backtrace.stderr)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,20 +6,32 @@ use std::thread;
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
|
use log::{error, info};
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use tracing::{error, info};
|
|
||||||
use tracing_utils::http::OtelName;
|
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::{ComputeNode, ComputeStatus};
|
||||||
|
|
||||||
// Service function to handle all available routes.
|
// Service function to handle all available routes.
|
||||||
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body> {
|
||||||
//
|
|
||||||
// NOTE: The URI path is currently included in traces. That's OK because
|
|
||||||
// it doesn't contain any variable parts or sensitive information. But
|
|
||||||
// please keep that in mind if you change the routing here.
|
|
||||||
//
|
|
||||||
match (req.method(), req.uri().path()) {
|
match (req.method(), req.uri().path()) {
|
||||||
|
// Timestamp of the last Postgres activity in the plain text.
|
||||||
|
// DEPRECATED in favour of /status
|
||||||
|
(&Method::GET, "/last_activity") => {
|
||||||
|
info!("serving /last_active GET request");
|
||||||
|
let state = compute.state.read().unwrap();
|
||||||
|
|
||||||
|
// Use RFC3339 format for consistency.
|
||||||
|
Response::new(Body::from(state.last_active.to_rfc3339()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has compute setup process finished? -> true/false.
|
||||||
|
// DEPRECATED in favour of /status
|
||||||
|
(&Method::GET, "/ready") => {
|
||||||
|
info!("serving /ready GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
Response::new(Body::from(format!("{}", status == ComputeStatus::Running)))
|
||||||
|
}
|
||||||
|
|
||||||
// Serialized compute state.
|
// Serialized compute state.
|
||||||
(&Method::GET, "/status") => {
|
(&Method::GET, "/status") => {
|
||||||
info!("serving /status GET request");
|
info!("serving /status GET request");
|
||||||
@@ -34,9 +46,19 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DEPRECATED, use POST instead
|
||||||
|
(&Method::GET, "/check_writability") => {
|
||||||
|
info!("serving /check_writability GET request");
|
||||||
|
let res = crate::checker::check_writability(&compute).await;
|
||||||
|
match res {
|
||||||
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
|
Err(e) => Response::new(Body::from(e.to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
(&Method::POST, "/check_writability") => {
|
(&Method::POST, "/check_writability") => {
|
||||||
info!("serving /check_writability POST request");
|
info!("serving /check_writability POST request");
|
||||||
let res = crate::checker::check_writability(compute).await;
|
let res = crate::checker::check_writability(&compute).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
Err(e) => Response::new(Body::from(e.to_string())),
|
||||||
@@ -62,19 +84,7 @@ async fn serve(state: Arc<ComputeNode>) {
|
|||||||
async move {
|
async move {
|
||||||
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
async move {
|
async move { Ok::<_, Infallible>(routes(req, state).await) }
|
||||||
Ok::<_, Infallible>(
|
|
||||||
// NOTE: We include the URI path in the string. It
|
|
||||||
// doesn't contain any variable parts or sensitive
|
|
||||||
// information in this API.
|
|
||||||
tracing_utils::http::tracing_handler(
|
|
||||||
req,
|
|
||||||
|req| routes(req, &state),
|
|
||||||
OtelName::UriPath,
|
|
||||||
)
|
|
||||||
.await,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -37,7 +37,58 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeMetrics"
|
$ref: "#/components/schemas/ComputeMetrics"
|
||||||
|
|
||||||
|
/ready:
|
||||||
|
get:
|
||||||
|
deprecated: true
|
||||||
|
tags:
|
||||||
|
- "info"
|
||||||
|
summary: Check whether compute startup process finished successfully
|
||||||
|
description: ""
|
||||||
|
operationId: computeIsReady
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Compute is ready ('true') or not ('false')
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
example: "true"
|
||||||
|
|
||||||
|
/last_activity:
|
||||||
|
get:
|
||||||
|
deprecated: true
|
||||||
|
tags:
|
||||||
|
- "info"
|
||||||
|
summary: Get timestamp of the last compute activity
|
||||||
|
description: ""
|
||||||
|
operationId: getLastComputeActivityTS
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Timestamp of the last compute activity
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
example: "2022-10-12T07:20:50.52Z"
|
||||||
|
|
||||||
/check_writability:
|
/check_writability:
|
||||||
|
get:
|
||||||
|
deprecated: true
|
||||||
|
tags:
|
||||||
|
- "check"
|
||||||
|
summary: Check that we can write new data on this compute
|
||||||
|
description: ""
|
||||||
|
operationId: checkComputeWritabilityDeprecated
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Check result
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Error text or 'true' if check passed
|
||||||
|
example: "true"
|
||||||
|
|
||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
- "check"
|
- "check"
|
||||||
|
|||||||
@@ -1,50 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::process;
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
|
||||||
|
|
||||||
const VM_INFORMANT_PATH: &str = "/bin/vm-informant";
|
|
||||||
const RESTART_INFORMANT_AFTER_MILLIS: u64 = 5000;
|
|
||||||
|
|
||||||
/// Launch a thread to start the VM informant if it's present (and restart, on failure)
|
|
||||||
pub fn spawn_vm_informant_if_present() -> Result<Option<thread::JoinHandle<()>>> {
|
|
||||||
let exists = Path::new(VM_INFORMANT_PATH)
|
|
||||||
.try_exists()
|
|
||||||
.context("could not check if path exists")?;
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(
|
|
||||||
thread::Builder::new()
|
|
||||||
.name("run-vm-informant".into())
|
|
||||||
.spawn(move || run_informant())?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_informant() -> ! {
|
|
||||||
let restart_wait = Duration::from_millis(RESTART_INFORMANT_AFTER_MILLIS);
|
|
||||||
|
|
||||||
info!("starting VM informant");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut cmd = process::Command::new(VM_INFORMANT_PATH);
|
|
||||||
// Block on subprocess:
|
|
||||||
let result = cmd.status();
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Err(e) => warn!("failed to run VM informant at {VM_INFORMANT_PATH:?}: {e}"),
|
|
||||||
Ok(status) if !status.success() => {
|
|
||||||
warn!("{VM_INFORMANT_PATH} exited with code {status:?}, retrying")
|
|
||||||
}
|
|
||||||
Ok(_) => info!("{VM_INFORMANT_PATH} ended gracefully (unexpectedly). Retrying"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait before retrying
|
|
||||||
thread::sleep(restart_wait);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,6 @@ pub mod http;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
pub mod informant;
|
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
|
|||||||
@@ -1,37 +1,43 @@
|
|||||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
use std::io::Write;
|
||||||
use tracing_subscriber::layer::SubscriberExt;
|
|
||||||
use tracing_subscriber::prelude::*;
|
|
||||||
|
|
||||||
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
use anyhow::Result;
|
||||||
///
|
use chrono::Utc;
|
||||||
/// Logging is configured using either `default_log_level` or
|
use env_logger::{Builder, Env};
|
||||||
|
|
||||||
|
macro_rules! info_println {
|
||||||
|
($($tts:tt)*) => {
|
||||||
|
if log_enabled!(Level::Info) {
|
||||||
|
println!($($tts)*);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! info_print {
|
||||||
|
($($tts:tt)*) => {
|
||||||
|
if log_enabled!(Level::Info) {
|
||||||
|
print!($($tts)*);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize `env_logger` using either `default_level` or
|
||||||
/// `RUST_LOG` environment variable as default log level.
|
/// `RUST_LOG` environment variable as default log level.
|
||||||
///
|
pub fn init_logger(default_level: &str) -> Result<()> {
|
||||||
/// OpenTelemetry is configured with OTLP/HTTP exporter. It picks up
|
let env = Env::default().filter_or("RUST_LOG", default_level);
|
||||||
/// configuration from environment variables. For example, to change the destination,
|
|
||||||
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
|
|
||||||
/// `tracing-utils` package description.
|
|
||||||
///
|
|
||||||
pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
|
||||||
// Initialize Logging
|
|
||||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
|
||||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
|
||||||
|
|
||||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
Builder::from_env(env)
|
||||||
.with_target(false)
|
.format(|buf, record| {
|
||||||
.with_writer(std::io::stderr);
|
let thread_handle = std::thread::current();
|
||||||
|
writeln!(
|
||||||
// Initialize OpenTelemetry
|
buf,
|
||||||
let otlp_layer =
|
"{} [{}] {}: {}",
|
||||||
tracing_utils::init_tracing_without_runtime("compute_ctl").map(OpenTelemetryLayer::new);
|
Utc::now().format("%Y-%m-%d %H:%M:%S%.3f %Z"),
|
||||||
|
thread_handle.name().unwrap_or("main"),
|
||||||
// Put it all together
|
record.level(),
|
||||||
tracing_subscriber::registry()
|
record.args()
|
||||||
.with(env_filter)
|
)
|
||||||
.with(otlp_layer)
|
})
|
||||||
.with(fmt_layer)
|
|
||||||
.init();
|
.init();
|
||||||
tracing::info!("logging and tracing started");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ use std::{thread, time};
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
|
use log::{debug, info};
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use tracing::{debug, info};
|
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
@@ -52,16 +52,10 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
||||||
|
|
||||||
for b in backs.into_iter() {
|
for b in backs.into_iter() {
|
||||||
let state: String = match b.try_get("state") {
|
let state: String = b.get("state");
|
||||||
Ok(state) => state,
|
let change: String = b.get("state_change");
|
||||||
Err(_) => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
if state == "idle" {
|
if state == "idle" {
|
||||||
let change: String = match b.try_get("state_change") {
|
|
||||||
Ok(state_change) => state_change,
|
|
||||||
Err(_) => continue,
|
|
||||||
};
|
|
||||||
let change = DateTime::parse_from_rfc3339(&change);
|
let change = DateTime::parse_from_rfc3339(&change);
|
||||||
match change {
|
match change {
|
||||||
Ok(t) => idle_backs.push(t.with_timezone(&Utc)),
|
Ok(t) => idle_backs.push(t.with_timezone(&Utc)),
|
||||||
@@ -80,8 +74,10 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get idle backend `state_change` with the max timestamp.
|
// Sort idle backend `state_change` timestamps. The last one corresponds
|
||||||
if let Some(last) = idle_backs.iter().max() {
|
// to the last activity.
|
||||||
|
idle_backs.sort();
|
||||||
|
if let Some(last) = idle_backs.last() {
|
||||||
last_active = *last;
|
last_active = *last;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,3 @@
|
|||||||
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
||||||
// From Postgres docs:
|
pub const DEFAULT_CONNSTRING: &str = "host=localhost user=postgres";
|
||||||
// To ease transition from the md5 method to the newer SCRAM method, if md5 is specified
|
|
||||||
// as a method in pg_hba.conf but the user's password on the server is encrypted for SCRAM
|
|
||||||
// (see below), then SCRAM-based authentication will automatically be chosen instead.
|
|
||||||
// https://www.postgresql.org/docs/15/auth-password.html
|
|
||||||
//
|
|
||||||
// So it's safe to set md5 here, as `control-plane` anyway uses SCRAM for all roles.
|
|
||||||
pub const PG_HBA_ALL_MD5: &str = "host\tall\t\tall\t\t0.0.0.0/0\t\tmd5";
|
pub const PG_HBA_ALL_MD5: &str = "host\tall\t\tall\t\t0.0.0.0/0\t\tmd5";
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
use std::fs;
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufRead, BufReader};
|
use std::io::{BufRead, BufReader};
|
||||||
|
use std::net::{SocketAddr, TcpStream};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
use std::time::{Duration, Instant};
|
use std::str::FromStr;
|
||||||
|
use std::{fs, thread, time};
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use notify::{RecursiveMode, Watcher};
|
|
||||||
use postgres::{Client, Transaction};
|
use postgres::{Client, Transaction};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, instrument};
|
|
||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: u64 = 60 * 1000; // milliseconds
|
||||||
|
|
||||||
/// Rust representation of Postgres role info with only those fields
|
/// Rust representation of Postgres role info with only those fields
|
||||||
/// that matter for us.
|
/// that matter for us.
|
||||||
@@ -66,7 +65,7 @@ impl GenericOption {
|
|||||||
let name = match self.name.as_str() {
|
let name = match self.name.as_str() {
|
||||||
"safekeepers" => "neon.safekeepers",
|
"safekeepers" => "neon.safekeepers",
|
||||||
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
||||||
"wal_acceptor_connection_timeout" => "neon.safekeeper_connection_timeout",
|
"wal_acceptor_connect_timeout" => "neon.safekeeper_connect_timeout",
|
||||||
it => it,
|
it => it,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -120,9 +119,16 @@ pub trait GenericOptionsSearch {
|
|||||||
impl GenericOptionsSearch for GenericOptions {
|
impl GenericOptionsSearch for GenericOptions {
|
||||||
/// Lookup option by name
|
/// Lookup option by name
|
||||||
fn find(&self, name: &str) -> Option<String> {
|
fn find(&self, name: &str) -> Option<String> {
|
||||||
let ops = self.as_ref()?;
|
match &self {
|
||||||
let op = ops.iter().find(|s| s.name == name)?;
|
Some(ops) => {
|
||||||
op.value.clone()
|
let op = ops.iter().find(|s| s.name == name);
|
||||||
|
match op {
|
||||||
|
Some(op) => op.value.clone(),
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,8 +136,8 @@ impl Role {
|
|||||||
/// Serialize a list of role parameters into a Postgres-acceptable
|
/// Serialize a list of role parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
pub fn to_pg_options(&self) -> String {
|
pub fn to_pg_options(&self) -> String {
|
||||||
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in Rails.
|
||||||
// For now, we do not use generic `options` for roles. Once used, add
|
// For now we do not use generic `options` for roles. Once used, add
|
||||||
// `self.options.as_pg_options()` somewhere here.
|
// `self.options.as_pg_options()` somewhere here.
|
||||||
let mut params: String = "LOGIN".to_string();
|
let mut params: String = "LOGIN".to_string();
|
||||||
|
|
||||||
@@ -155,14 +161,6 @@ impl Role {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
impl Database {
|
||||||
pub fn new(name: PgIdent, owner: PgIdent) -> Self {
|
|
||||||
Self {
|
|
||||||
name,
|
|
||||||
owner,
|
|
||||||
options: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize a list of database parameters into a Postgres-acceptable
|
/// Serialize a list of database parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
||||||
@@ -170,7 +168,7 @@ impl Database {
|
|||||||
/// it may require a proper quoting too.
|
/// it may require a proper quoting too.
|
||||||
pub fn to_pg_options(&self) -> String {
|
pub fn to_pg_options(&self) -> String {
|
||||||
let mut params: String = self.options.as_pg_options();
|
let mut params: String = self.options.as_pg_options();
|
||||||
write!(params, " OWNER {}", &self.owner.pg_quote())
|
write!(params, " OWNER {}", &self.owner.quote())
|
||||||
.expect("String is documented to not to error during write operations");
|
.expect("String is documented to not to error during write operations");
|
||||||
|
|
||||||
params
|
params
|
||||||
@@ -181,17 +179,18 @@ impl Database {
|
|||||||
/// intended to be used for DB / role names.
|
/// intended to be used for DB / role names.
|
||||||
pub type PgIdent = String;
|
pub type PgIdent = String;
|
||||||
|
|
||||||
/// Generic trait used to provide quoting / encoding for strings used in the
|
/// Generic trait used to provide quoting for strings used in the
|
||||||
/// Postgres SQL queries and DATABASE_URL.
|
/// Postgres SQL queries. Currently used only to implement quoting
|
||||||
pub trait Escaping {
|
/// of identifiers, but could be used for literals in the future.
|
||||||
fn pg_quote(&self) -> String;
|
pub trait PgQuote {
|
||||||
|
fn quote(&self) -> String;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Escaping for PgIdent {
|
impl PgQuote for PgIdent {
|
||||||
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
||||||
/// always quotes provided string with `""` and escapes every `"`.
|
/// always quotes provided string with `""` and escapes every `"`. Not idempotent,
|
||||||
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again.
|
/// i.e. if string is already escaped it will be escaped again.
|
||||||
fn pg_quote(&self) -> String {
|
fn quote(&self) -> String {
|
||||||
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -221,119 +220,62 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| Database::new(row.get("datname"), row.get("owner")))
|
.map(|row| Database {
|
||||||
|
name: row.get("datname"),
|
||||||
|
owner: row.get("owner"),
|
||||||
|
options: None,
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(postgres_dbs)
|
Ok(postgres_dbs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for Postgres to become ready to accept connections. It's ready to
|
/// Wait for Postgres to become ready to accept connections:
|
||||||
/// accept connections when the state-field in `pgdata/postmaster.pid` says
|
/// - state should be `ready` in the `pgdata/postmaster.pid`
|
||||||
/// 'ready'.
|
/// - and we should be able to connect to 127.0.0.1:5432
|
||||||
#[instrument(skip(pg))]
|
pub fn wait_for_postgres(pg: &mut Child, port: &str, pgdata: &Path) -> Result<()> {
|
||||||
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
|
|
||||||
let pid_path = pgdata.join("postmaster.pid");
|
let pid_path = pgdata.join("postmaster.pid");
|
||||||
|
let mut slept: u64 = 0; // ms
|
||||||
|
let pause = time::Duration::from_millis(100);
|
||||||
|
|
||||||
// PostgreSQL writes line "ready" to the postmaster.pid file, when it has
|
let timeout = time::Duration::from_millis(10);
|
||||||
// completed initialization and is ready to accept connections. We want to
|
let addr = SocketAddr::from_str(&format!("127.0.0.1:{}", port)).unwrap();
|
||||||
// react quickly and perform the rest of our initialization as soon as
|
|
||||||
// PostgreSQL starts accepting connections. Use 'notify' to be notified
|
|
||||||
// whenever the PID file is changed, and whenever it changes, read it to
|
|
||||||
// check if it's now "ready".
|
|
||||||
//
|
|
||||||
// You cannot actually watch a file before it exists, so we first watch the
|
|
||||||
// data directory, and once the postmaster.pid file appears, we switch to
|
|
||||||
// watch the file instead. We also wake up every 100 ms to poll, just in
|
|
||||||
// case we miss some events for some reason. Not strictly necessary, but
|
|
||||||
// better safe than sorry.
|
|
||||||
let (tx, rx) = std::sync::mpsc::channel();
|
|
||||||
let (mut watcher, rx): (Box<dyn Watcher>, _) = match notify::recommended_watcher(move |res| {
|
|
||||||
let _ = tx.send(res);
|
|
||||||
}) {
|
|
||||||
Ok(watcher) => (Box::new(watcher), rx),
|
|
||||||
Err(e) => {
|
|
||||||
match e.kind {
|
|
||||||
notify::ErrorKind::Io(os) if os.raw_os_error() == Some(38) => {
|
|
||||||
// docker on m1 macs does not support recommended_watcher
|
|
||||||
// but return "Function not implemented (os error 38)"
|
|
||||||
// see https://github.com/notify-rs/notify/issues/423
|
|
||||||
let (tx, rx) = std::sync::mpsc::channel();
|
|
||||||
|
|
||||||
// let's poll it faster than what we check the results for (100ms)
|
|
||||||
let config =
|
|
||||||
notify::Config::default().with_poll_interval(Duration::from_millis(50));
|
|
||||||
|
|
||||||
let watcher = notify::PollWatcher::new(
|
|
||||||
move |res| {
|
|
||||||
let _ = tx.send(res);
|
|
||||||
},
|
|
||||||
config,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
(Box::new(watcher), rx)
|
|
||||||
}
|
|
||||||
_ => return Err(e.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
watcher.watch(pgdata, RecursiveMode::NonRecursive)?;
|
|
||||||
|
|
||||||
let started_at = Instant::now();
|
|
||||||
let mut postmaster_pid_seen = false;
|
|
||||||
loop {
|
loop {
|
||||||
|
// Sleep POSTGRES_WAIT_TIMEOUT at max (a bit longer actually if consider a TCP timeout,
|
||||||
|
// but postgres starts listening almost immediately, even if it is not really
|
||||||
|
// ready to accept connections).
|
||||||
|
if slept >= POSTGRES_WAIT_TIMEOUT {
|
||||||
|
bail!("timed out while waiting for Postgres to start");
|
||||||
|
}
|
||||||
|
|
||||||
if let Ok(Some(status)) = pg.try_wait() {
|
if let Ok(Some(status)) = pg.try_wait() {
|
||||||
// Postgres exited, that is not what we expected, bail out earlier.
|
// Postgres exited, that is not what we expected, bail out earlier.
|
||||||
let code = status.code().unwrap_or(-1);
|
let code = status.code().unwrap_or(-1);
|
||||||
bail!("Postgres exited unexpectedly with code {}", code);
|
bail!("Postgres exited unexpectedly with code {}", code);
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = rx.recv_timeout(Duration::from_millis(100));
|
|
||||||
debug!("woken up by notify: {res:?}");
|
|
||||||
// If there are multiple events in the channel already, we only need to be
|
|
||||||
// check once. Swallow the extra events before we go ahead to check the
|
|
||||||
// pid file.
|
|
||||||
while let Ok(res) = rx.try_recv() {
|
|
||||||
debug!("swallowing extra event: {res:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that we can open pid file first.
|
// Check that we can open pid file first.
|
||||||
if let Ok(file) = File::open(&pid_path) {
|
if let Ok(file) = File::open(&pid_path) {
|
||||||
if !postmaster_pid_seen {
|
|
||||||
debug!("postmaster.pid appeared");
|
|
||||||
watcher
|
|
||||||
.unwatch(pgdata)
|
|
||||||
.expect("Failed to remove pgdata dir watch");
|
|
||||||
watcher
|
|
||||||
.watch(&pid_path, RecursiveMode::NonRecursive)
|
|
||||||
.expect("Failed to add postmaster.pid file watch");
|
|
||||||
postmaster_pid_seen = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let file = BufReader::new(file);
|
let file = BufReader::new(file);
|
||||||
let last_line = file.lines().last();
|
let last_line = file.lines().last();
|
||||||
|
|
||||||
// Pid file could be there and we could read it, but it could be empty, for example.
|
// Pid file could be there and we could read it, but it could be empty, for example.
|
||||||
if let Some(Ok(line)) = last_line {
|
if let Some(Ok(line)) = last_line {
|
||||||
let status = line.trim();
|
let status = line.trim();
|
||||||
debug!("last line of postmaster.pid: {status:?}");
|
let can_connect = TcpStream::connect_timeout(&addr, timeout).is_ok();
|
||||||
|
|
||||||
// Now Postgres is ready to accept connections
|
// Now Postgres is ready to accept connections
|
||||||
if status == "ready" {
|
if status == "ready" && can_connect {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give up after POSTGRES_WAIT_TIMEOUT.
|
thread::sleep(pause);
|
||||||
let duration = started_at.elapsed();
|
slept += 100;
|
||||||
if duration >= POSTGRES_WAIT_TIMEOUT {
|
|
||||||
bail!("timed out while waiting for Postgres to start");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::info!("PostgreSQL is now running, continuing to configure it");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use postgres::config::Config;
|
use log::{info, log_enabled, warn, Level};
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{info, info_span, instrument, span_enabled, warn, Level};
|
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
use crate::config;
|
use crate::config;
|
||||||
@@ -23,8 +20,6 @@ pub struct ComputeSpec {
|
|||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|
||||||
pub startup_tracing_context: Option<HashMap<String, String>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster state seen from the perspective of the external tools
|
/// Cluster state seen from the perspective of the external tools
|
||||||
@@ -82,25 +77,23 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
|||||||
|
|
||||||
/// Given a cluster spec json and open transaction it handles roles creation,
|
/// Given a cluster spec json and open transaction it handles roles creation,
|
||||||
/// deletion and update.
|
/// deletion and update.
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
let mut xact = client.transaction()?;
|
let mut xact = client.transaction()?;
|
||||||
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
|
||||||
// Print a list of existing Postgres roles (only in debug mode)
|
// Print a list of existing Postgres roles (only in debug mode)
|
||||||
if span_enabled!(Level::INFO) {
|
info!("postgres roles:");
|
||||||
info!("postgres roles:");
|
for r in &existing_roles {
|
||||||
for r in &existing_roles {
|
info_println!(
|
||||||
info!(
|
"{} - {}:{}",
|
||||||
" - {}:{}",
|
" ".repeat(27 + 5),
|
||||||
r.name,
|
r.name,
|
||||||
if r.encrypted_password.is_some() {
|
if r.encrypted_password.is_some() {
|
||||||
"[FILTERED]"
|
"[FILTERED]"
|
||||||
} else {
|
} else {
|
||||||
"(null)"
|
"(null)"
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process delta operations first
|
// Process delta operations first
|
||||||
@@ -122,8 +115,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_roles.iter().any(|r| r.name == op.name) {
|
if existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER ROLE {} RENAME TO {}",
|
"ALTER ROLE {} RENAME TO {}",
|
||||||
op.name.pg_quote(),
|
op.name.quote(),
|
||||||
new_name.pg_quote()
|
new_name.quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
||||||
@@ -141,80 +134,58 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
info!("cluster spec roles:");
|
info!("cluster spec roles:");
|
||||||
for role in &spec.cluster.roles {
|
for role in &spec.cluster.roles {
|
||||||
let name = &role.name;
|
let name = &role.name;
|
||||||
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
|
||||||
let pg_role = existing_roles.iter().find(|r| r.name == *name);
|
|
||||||
|
|
||||||
enum RoleAction {
|
info_print!(
|
||||||
None,
|
"{} - {}:{}",
|
||||||
Update,
|
" ".repeat(27 + 5),
|
||||||
Create,
|
name,
|
||||||
}
|
if role.encrypted_password.is_some() {
|
||||||
let action = if let Some(r) = pg_role {
|
|
||||||
if (r.encrypted_password.is_none() && role.encrypted_password.is_some())
|
|
||||||
|| (r.encrypted_password.is_some() && role.encrypted_password.is_none())
|
|
||||||
{
|
|
||||||
RoleAction::Update
|
|
||||||
} else if let Some(pg_pwd) = &r.encrypted_password {
|
|
||||||
// Check whether password changed or not (trim 'md5' prefix first if any)
|
|
||||||
//
|
|
||||||
// This is a backward compatibility hack, which comes from the times when we were using
|
|
||||||
// md5 for everyone and hashes were stored in the console db without md5 prefix. So when
|
|
||||||
// role comes from the control-plane (json spec) `Role.encrypted_password` doesn't have md5 prefix,
|
|
||||||
// but when role comes from Postgres (`get_existing_roles` / `existing_roles`) it has this prefix.
|
|
||||||
// Here is the only place so far where we compare hashes, so it seems to be the best candidate
|
|
||||||
// to place this compatibility layer.
|
|
||||||
let pg_pwd = if let Some(stripped) = pg_pwd.strip_prefix("md5") {
|
|
||||||
stripped
|
|
||||||
} else {
|
|
||||||
pg_pwd
|
|
||||||
};
|
|
||||||
if pg_pwd != *role.encrypted_password.as_ref().unwrap() {
|
|
||||||
RoleAction::Update
|
|
||||||
} else {
|
|
||||||
RoleAction::None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
RoleAction::None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
RoleAction::Create
|
|
||||||
};
|
|
||||||
|
|
||||||
match action {
|
|
||||||
RoleAction::None => {}
|
|
||||||
RoleAction::Update => {
|
|
||||||
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote());
|
|
||||||
query.push_str(&role.to_pg_options());
|
|
||||||
xact.execute(query.as_str(), &[])?;
|
|
||||||
}
|
|
||||||
RoleAction::Create => {
|
|
||||||
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote());
|
|
||||||
info!("role create query: '{}'", &query);
|
|
||||||
query.push_str(&role.to_pg_options());
|
|
||||||
xact.execute(query.as_str(), &[])?;
|
|
||||||
|
|
||||||
let grant_query = format!(
|
|
||||||
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
|
||||||
name.pg_quote()
|
|
||||||
);
|
|
||||||
xact.execute(grant_query.as_str(), &[])?;
|
|
||||||
info!("role grant query: '{}'", &grant_query);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if span_enabled!(Level::INFO) {
|
|
||||||
let pwd = if role.encrypted_password.is_some() {
|
|
||||||
"[FILTERED]"
|
"[FILTERED]"
|
||||||
} else {
|
} else {
|
||||||
"(null)"
|
"(null)"
|
||||||
};
|
}
|
||||||
let action_str = match action {
|
);
|
||||||
RoleAction::None => "",
|
|
||||||
RoleAction::Create => " -> create",
|
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
||||||
RoleAction::Update => " -> update",
|
let pg_role = existing_roles.iter().find(|r| r.name == *name);
|
||||||
};
|
|
||||||
info!(" - {}:{}{}", name, pwd, action_str);
|
if let Some(r) = pg_role {
|
||||||
|
let mut update_role = false;
|
||||||
|
|
||||||
|
if (r.encrypted_password.is_none() && role.encrypted_password.is_some())
|
||||||
|
|| (r.encrypted_password.is_some() && role.encrypted_password.is_none())
|
||||||
|
{
|
||||||
|
update_role = true;
|
||||||
|
} else if let Some(pg_pwd) = &r.encrypted_password {
|
||||||
|
// Check whether password changed or not (trim 'md5:' prefix first)
|
||||||
|
update_role = pg_pwd[3..] != *role.encrypted_password.as_ref().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
if update_role {
|
||||||
|
let mut query: String = format!("ALTER ROLE {} ", name.quote());
|
||||||
|
info_print!(" -> update");
|
||||||
|
|
||||||
|
query.push_str(&role.to_pg_options());
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!("role name: '{}'", &name);
|
||||||
|
let mut query: String = format!("CREATE ROLE {} ", name.quote());
|
||||||
|
info!("role create query: '{}'", &query);
|
||||||
|
info_print!(" -> create");
|
||||||
|
|
||||||
|
query.push_str(&role.to_pg_options());
|
||||||
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
|
||||||
|
let grant_query = format!(
|
||||||
|
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
||||||
|
name.quote()
|
||||||
|
);
|
||||||
|
xact.execute(grant_query.as_str(), &[])?;
|
||||||
|
info!("role grant query: '{}'", &grant_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info_print!("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
xact.commit()?;
|
xact.commit()?;
|
||||||
@@ -223,43 +194,33 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reassign all dependent objects and delete requested roles.
|
/// Reassign all dependent objects and delete requested roles.
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
||||||
if let Some(ops) = &node.spec.delta_operations {
|
let spec = &node.spec;
|
||||||
// First, reassign all dependent objects to db owners.
|
|
||||||
|
// First, reassign all dependent objects to db owners.
|
||||||
|
if let Some(ops) = &spec.delta_operations {
|
||||||
info!("reassigning dependent objects of to-be-deleted roles");
|
info!("reassigning dependent objects of to-be-deleted roles");
|
||||||
|
|
||||||
// Fetch existing roles. We could've exported and used `existing_roles` from
|
|
||||||
// `handle_roles()`, but we only make this list there before creating new roles.
|
|
||||||
// Which is probably fine as we never create to-be-deleted roles, but that'd
|
|
||||||
// just look a bit untidy. Anyway, the entire `pg_roles` should be in shared
|
|
||||||
// buffers already, so this shouldn't be a big deal.
|
|
||||||
let mut xact = client.transaction()?;
|
|
||||||
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
|
||||||
xact.commit()?;
|
|
||||||
|
|
||||||
for op in ops {
|
for op in ops {
|
||||||
// Check that role is still present in Postgres, as this could be a
|
if op.action == "delete_role" {
|
||||||
// restart with the same spec after role deletion.
|
|
||||||
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
|
||||||
reassign_owned_objects(node, &op.name)?;
|
reassign_owned_objects(node, &op.name)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Second, proceed with role deletions.
|
// Second, proceed with role deletions.
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
if let Some(ops) = &spec.delta_operations {
|
||||||
info!("processing role deletions");
|
info!("processing role deletions");
|
||||||
let mut xact = client.transaction()?;
|
|
||||||
for op in ops {
|
for op in ops {
|
||||||
// We do not check either role exists or not,
|
// We do not check either role exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
if op.action == "delete_role" {
|
if op.action == "delete_role" {
|
||||||
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote());
|
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.quote());
|
||||||
|
|
||||||
warn!("deleting role '{}'", &op.name);
|
warn!("deleting role '{}'", &op.name);
|
||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xact.commit()?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -269,16 +230,17 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &node.spec.cluster.databases {
|
||||||
if db.owner != *role_name {
|
if db.owner != *role_name {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
let mut connstr = node.connstr.clone();
|
||||||
conf.dbname(&db.name);
|
// database name is always the last and the only component of the path
|
||||||
|
connstr.set_path(&db.name);
|
||||||
|
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
|
|
||||||
// This will reassign all dependent objects to the db owner
|
// This will reassign all dependent objects to the db owner
|
||||||
let reassign_query = format!(
|
let reassign_query = format!(
|
||||||
"REASSIGN OWNED BY {} TO {}",
|
"REASSIGN OWNED BY {} TO {}",
|
||||||
role_name.pg_quote(),
|
role_name.quote(),
|
||||||
db.owner.pg_quote()
|
db.owner.quote()
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
||||||
@@ -287,7 +249,7 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
|
|||||||
client.simple_query(&reassign_query)?;
|
client.simple_query(&reassign_query)?;
|
||||||
|
|
||||||
// This now will only drop privileges of the role
|
// This now will only drop privileges of the role
|
||||||
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote());
|
let drop_query = format!("DROP OWNED BY {}", role_name.quote());
|
||||||
client.simple_query(&drop_query)?;
|
client.simple_query(&drop_query)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -300,16 +262,13 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
|
|||||||
/// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level
|
/// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level
|
||||||
/// atomicity should be enough here due to the order of operations and various checks,
|
/// atomicity should be enough here due to the order of operations and various checks,
|
||||||
/// which together provide us idempotency.
|
/// which together provide us idempotency.
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
let existing_dbs: Vec<Database> = get_existing_dbs(client)?;
|
let existing_dbs: Vec<Database> = get_existing_dbs(client)?;
|
||||||
|
|
||||||
// Print a list of existing Postgres databases (only in debug mode)
|
// Print a list of existing Postgres databases (only in debug mode)
|
||||||
if span_enabled!(Level::INFO) {
|
info!("postgres databases:");
|
||||||
info!("postgres databases:");
|
for r in &existing_dbs {
|
||||||
for r in &existing_dbs {
|
info_println!("{} - {}:{}", " ".repeat(27 + 5), r.name, r.owner);
|
||||||
info!(" {}:{}", r.name, r.owner);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process delta operations first
|
// Process delta operations first
|
||||||
@@ -320,7 +279,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// We do not check either DB exists or not,
|
// We do not check either DB exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
"delete_db" => {
|
"delete_db" => {
|
||||||
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
|
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.quote());
|
||||||
|
|
||||||
warn!("deleting database '{}'", &op.name);
|
warn!("deleting database '{}'", &op.name);
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
@@ -332,8 +291,8 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_dbs.iter().any(|r| r.name == op.name) {
|
if existing_dbs.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} RENAME TO {}",
|
"ALTER DATABASE {} RENAME TO {}",
|
||||||
op.name.pg_quote(),
|
op.name.quote(),
|
||||||
new_name.pg_quote()
|
new_name.quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
||||||
@@ -352,60 +311,39 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
for db in &spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
let name = &db.name;
|
let name = &db.name;
|
||||||
|
|
||||||
|
info_print!("{} - {}:{}", " ".repeat(27 + 5), db.name, db.owner);
|
||||||
|
|
||||||
// XXX: with a limited number of databases it is fine, but consider making it a HashMap
|
// XXX: with a limited number of databases it is fine, but consider making it a HashMap
|
||||||
let pg_db = existing_dbs.iter().find(|r| r.name == *name);
|
let pg_db = existing_dbs.iter().find(|r| r.name == *name);
|
||||||
|
|
||||||
enum DatabaseAction {
|
if let Some(r) = pg_db {
|
||||||
None,
|
|
||||||
Update,
|
|
||||||
Create,
|
|
||||||
}
|
|
||||||
let action = if let Some(r) = pg_db {
|
|
||||||
// XXX: db owner name is returned as quoted string from Postgres,
|
// XXX: db owner name is returned as quoted string from Postgres,
|
||||||
// when quoting is needed.
|
// when quoting is needed.
|
||||||
let new_owner = if r.owner.starts_with('"') {
|
let new_owner = if r.owner.starts_with('"') {
|
||||||
db.owner.pg_quote()
|
db.owner.quote()
|
||||||
} else {
|
} else {
|
||||||
db.owner.clone()
|
db.owner.clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
if new_owner != r.owner {
|
if new_owner != r.owner {
|
||||||
// Update the owner
|
|
||||||
DatabaseAction::Update
|
|
||||||
} else {
|
|
||||||
DatabaseAction::None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DatabaseAction::Create
|
|
||||||
};
|
|
||||||
|
|
||||||
match action {
|
|
||||||
DatabaseAction::None => {}
|
|
||||||
DatabaseAction::Update => {
|
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} OWNER TO {}",
|
"ALTER DATABASE {} OWNER TO {}",
|
||||||
name.pg_quote(),
|
name.quote(),
|
||||||
db.owner.pg_quote()
|
db.owner.quote()
|
||||||
);
|
);
|
||||||
let _guard = info_span!("executing", query).entered();
|
info_print!(" -> update");
|
||||||
client.execute(query.as_str(), &[])?;
|
|
||||||
}
|
|
||||||
DatabaseAction::Create => {
|
|
||||||
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
|
||||||
query.push_str(&db.to_pg_options());
|
|
||||||
let _guard = info_span!("executing", query).entered();
|
|
||||||
client.execute(query.as_str(), &[])?;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if span_enabled!(Level::INFO) {
|
client.execute(query.as_str(), &[])?;
|
||||||
let action_str = match action {
|
}
|
||||||
DatabaseAction::None => "",
|
} else {
|
||||||
DatabaseAction::Create => " -> create",
|
let mut query: String = format!("CREATE DATABASE {} ", name.quote());
|
||||||
DatabaseAction::Update => " -> update",
|
info_print!(" -> create");
|
||||||
};
|
|
||||||
info!(" - {}:{}{}", db.name, db.owner, action_str);
|
query.push_str(&db.to_pg_options());
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info_print!("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -413,7 +351,6 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
||||||
let spec = &node.spec;
|
let spec = &node.spec;
|
||||||
|
|
||||||
@@ -429,7 +366,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
.cluster
|
.cluster
|
||||||
.roles
|
.roles
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| r.name.pg_quote())
|
.map(|r| r.name.quote())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for db in &spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
@@ -437,7 +374,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"GRANT CREATE ON DATABASE {} TO {}",
|
"GRANT CREATE ON DATABASE {} TO {}",
|
||||||
dbname.pg_quote(),
|
dbname.quote(),
|
||||||
roles.join(", ")
|
roles.join(", ")
|
||||||
);
|
);
|
||||||
info!("grant query {}", &query);
|
info!("grant query {}", &query);
|
||||||
@@ -448,11 +385,12 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
// Do some per-database access adjustments. We'd better do this at db creation time,
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
// atomically.
|
// atomically.
|
||||||
|
let mut db_connstr = node.connstr.clone();
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &node.spec.cluster.databases {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
// database name is always the last and the only component of the path
|
||||||
conf.dbname(&db.name);
|
db_connstr.set_path(&db.name);
|
||||||
|
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = Client::connect(db_connstr.as_str(), NoTls)?;
|
||||||
|
|
||||||
// This will only change ownership on the schema itself, not the objects
|
// This will only change ownership on the schema itself, not the objects
|
||||||
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
||||||
@@ -481,36 +419,9 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
END IF;\n\
|
END IF;\n\
|
||||||
END\n\
|
END\n\
|
||||||
$$;",
|
$$;",
|
||||||
db.owner.pg_quote()
|
db.owner.quote()
|
||||||
);
|
);
|
||||||
db_client.simple_query(&alter_query)?;
|
db_client.simple_query(&alter_query)?;
|
||||||
|
|
||||||
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
|
|
||||||
// This is needed because since postgres 15 this privilege is removed by default.
|
|
||||||
let grant_query = "DO $$\n\
|
|
||||||
BEGIN\n\
|
|
||||||
IF EXISTS(\n\
|
|
||||||
SELECT nspname\n\
|
|
||||||
FROM pg_catalog.pg_namespace\n\
|
|
||||||
WHERE nspname = 'public'\n\
|
|
||||||
) AND\n\
|
|
||||||
current_setting('server_version_num')::int/10000 >= 15\n\
|
|
||||||
THEN\n\
|
|
||||||
IF EXISTS(\n\
|
|
||||||
SELECT rolname\n\
|
|
||||||
FROM pg_catalog.pg_roles\n\
|
|
||||||
WHERE rolname = 'web_access'\n\
|
|
||||||
)\n\
|
|
||||||
THEN\n\
|
|
||||||
GRANT CREATE ON SCHEMA public TO web_access;\n\
|
|
||||||
END IF;\n\
|
|
||||||
END IF;\n\
|
|
||||||
END\n\
|
|
||||||
$$;"
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
info!("grant query for db {} : {}", &db.name, &grant_query);
|
|
||||||
db_client.simple_query(&grant_query)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -33,38 +33,9 @@ mod pg_helpers_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ident_pg_quote() {
|
fn quote_ident() {
|
||||||
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
||||||
|
|
||||||
assert_eq!(ident.pg_quote(), "\"\"\"name\"\";\\n select 1;\"");
|
assert_eq!(ident.quote(), "\"\"\"name\"\";\\n select 1;\"");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn generic_options_search() {
|
|
||||||
let generic_options: GenericOptions = Some(vec![
|
|
||||||
GenericOption {
|
|
||||||
name: "present_value".into(),
|
|
||||||
value: Some("value".into()),
|
|
||||||
vartype: "string".into(),
|
|
||||||
},
|
|
||||||
GenericOption {
|
|
||||||
name: "missed_value".into(),
|
|
||||||
value: None,
|
|
||||||
vartype: "int".into(),
|
|
||||||
},
|
|
||||||
]);
|
|
||||||
assert_eq!(generic_options.find("present_value"), Some("value".into()));
|
|
||||||
assert_eq!(generic_options.find("missed_value"), None);
|
|
||||||
assert_eq!(generic_options.find("invalid_value"), None);
|
|
||||||
|
|
||||||
let empty_generic_options: GenericOptions = Some(vec![]);
|
|
||||||
assert_eq!(empty_generic_options.find("present_value"), None);
|
|
||||||
assert_eq!(empty_generic_options.find("missed_value"), None);
|
|
||||||
assert_eq!(empty_generic_options.find("invalid_value"), None);
|
|
||||||
|
|
||||||
let none_generic_options: GenericOptions = None;
|
|
||||||
assert_eq!(none_generic_options.find("present_value"), None);
|
|
||||||
assert_eq!(none_generic_options.find("missed_value"), None);
|
|
||||||
assert_eq!(none_generic_options.find("invalid_value"), None);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,31 +1,25 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "control_plane"
|
name = "control_plane"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition.workspace = true
|
edition = "2021"
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
clap = "3.0"
|
||||||
clap.workspace = true
|
comfy-table = "5.0.1"
|
||||||
comfy-table.workspace = true
|
git-version = "0.3.5"
|
||||||
git-version.workspace = true
|
tar = "0.4.38"
|
||||||
nix.workspace = true
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||||
once_cell.workspace = true
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
postgres.workspace = true
|
serde_with = "1.12.0"
|
||||||
regex.workspace = true
|
toml = "0.5"
|
||||||
reqwest = { workspace = true, features = ["blocking", "json"] }
|
once_cell = "1.13.0"
|
||||||
serde.workspace = true
|
regex = "1"
|
||||||
serde_with.workspace = true
|
anyhow = "1.0"
|
||||||
tar.workspace = true
|
thiserror = "1"
|
||||||
thiserror.workspace = true
|
nix = "0.23"
|
||||||
toml.workspace = true
|
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
|
||||||
url.workspace = true
|
|
||||||
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
|
||||||
# instead, so that recompile times are better.
|
|
||||||
pageserver_api.workspace = true
|
|
||||||
safekeeper_api.workspace = true
|
|
||||||
postgres_connection.workspace = true
|
|
||||||
storage_broker.workspace = true
|
|
||||||
utils.workspace = true
|
|
||||||
|
|
||||||
workspace_hack.workspace = true
|
pageserver = { path = "../pageserver" }
|
||||||
|
safekeeper = { path = "../safekeeper" }
|
||||||
|
utils = { path = "../libs/utils" }
|
||||||
|
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||||
|
|||||||
@@ -10,5 +10,5 @@ id = 1
|
|||||||
pg_port = 5454
|
pg_port = 5454
|
||||||
http_port = 7676
|
http_port = 7676
|
||||||
|
|
||||||
[broker]
|
[etcd_broker]
|
||||||
listen_addr = '127.0.0.1:50051'
|
broker_endpoints = ['http://127.0.0.1:2379']
|
||||||
|
|||||||
@@ -1,337 +0,0 @@
|
|||||||
//! Spawns and kills background processes that are needed by Neon CLI.
|
|
||||||
//! Applies common set-up such as log and pid files (if needed) to every process.
|
|
||||||
//!
|
|
||||||
//! Neon CLI does not run in background, so it needs to store the information about
|
|
||||||
//! spawned processes, which it does in this module.
|
|
||||||
//! We do that by storing the pid of the process in the "${process_name}.pid" file.
|
|
||||||
//! The pid file can be created by the process itself
|
|
||||||
//! (Neon storage binaries do that and also ensure that a lock is taken onto that file)
|
|
||||||
//! or we create such file after starting the process
|
|
||||||
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
|
||||||
//! The pid stored in the file is later used to stop the service.
|
|
||||||
//!
|
|
||||||
//! See [`lock_file`] module for more info.
|
|
||||||
|
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::os::unix::prelude::AsRawFd;
|
|
||||||
use std::os::unix::process::CommandExt;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::process::{Child, Command};
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::{fs, io, thread};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use nix::errno::Errno;
|
|
||||||
use nix::fcntl::{FcntlArg, FdFlag};
|
|
||||||
use nix::sys::signal::{kill, Signal};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use utils::pid_file::{self, PidFileRead};
|
|
||||||
|
|
||||||
// These constants control the loop used to poll for process start / stop.
|
|
||||||
//
|
|
||||||
// The loop waits for at most 10 seconds, polling every 100 ms.
|
|
||||||
// Once a second, it prints a dot ("."), to give the user an indication that
|
|
||||||
// it's waiting. If the process hasn't started/stopped after 5 seconds,
|
|
||||||
// it prints a notice that it's taking long, but keeps waiting.
|
|
||||||
//
|
|
||||||
const RETRY_UNTIL_SECS: u64 = 10;
|
|
||||||
const RETRIES: u64 = (RETRY_UNTIL_SECS * 1000) / RETRY_INTERVAL_MILLIS;
|
|
||||||
const RETRY_INTERVAL_MILLIS: u64 = 100;
|
|
||||||
const DOT_EVERY_RETRIES: u64 = 10;
|
|
||||||
const NOTICE_AFTER_RETRIES: u64 = 50;
|
|
||||||
|
|
||||||
/// Argument to `start_process`, to indicate whether it should create pidfile or if the process creates
|
|
||||||
/// it itself.
|
|
||||||
pub enum InitialPidFile<'t> {
|
|
||||||
/// Create a pidfile, to allow future CLI invocations to manipulate the process.
|
|
||||||
Create(&'t Path),
|
|
||||||
/// The process will create the pidfile itself, need to wait for that event.
|
|
||||||
Expect(&'t Path),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start a background child process using the parameters given.
|
|
||||||
pub fn start_process<F, AI, A, EI>(
|
|
||||||
process_name: &str,
|
|
||||||
datadir: &Path,
|
|
||||||
command: &Path,
|
|
||||||
args: AI,
|
|
||||||
envs: EI,
|
|
||||||
initial_pid_file: InitialPidFile,
|
|
||||||
process_status_check: F,
|
|
||||||
) -> anyhow::Result<Child>
|
|
||||||
where
|
|
||||||
F: Fn() -> anyhow::Result<bool>,
|
|
||||||
AI: IntoIterator<Item = A>,
|
|
||||||
A: AsRef<OsStr>,
|
|
||||||
// Not generic AsRef<OsStr>, otherwise empty `envs` prevents type inference
|
|
||||||
EI: IntoIterator<Item = (String, String)>,
|
|
||||||
{
|
|
||||||
let log_path = datadir.join(format!("{process_name}.log"));
|
|
||||||
let process_log_file = fs::OpenOptions::new()
|
|
||||||
.create(true)
|
|
||||||
.write(true)
|
|
||||||
.append(true)
|
|
||||||
.open(&log_path)
|
|
||||||
.with_context(|| {
|
|
||||||
format!("Could not open {process_name} log file {log_path:?} for writing")
|
|
||||||
})?;
|
|
||||||
let same_file_for_stderr = process_log_file.try_clone().with_context(|| {
|
|
||||||
format!("Could not reuse {process_name} log file {log_path:?} for writing stderr")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let mut command = Command::new(command);
|
|
||||||
let background_command = command
|
|
||||||
.stdout(process_log_file)
|
|
||||||
.stderr(same_file_for_stderr)
|
|
||||||
.args(args);
|
|
||||||
let filled_cmd = fill_aws_secrets_vars(fill_rust_env_vars(background_command));
|
|
||||||
filled_cmd.envs(envs);
|
|
||||||
|
|
||||||
let pid_file_to_check = match initial_pid_file {
|
|
||||||
InitialPidFile::Create(path) => {
|
|
||||||
pre_exec_create_pidfile(filled_cmd, path);
|
|
||||||
path
|
|
||||||
}
|
|
||||||
InitialPidFile::Expect(path) => path,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut spawned_process = filled_cmd.spawn().with_context(|| {
|
|
||||||
format!("Could not spawn {process_name}, see console output and log files for details.")
|
|
||||||
})?;
|
|
||||||
let pid = spawned_process.id();
|
|
||||||
let pid = Pid::from_raw(
|
|
||||||
i32::try_from(pid)
|
|
||||||
.with_context(|| format!("Subprocess {process_name} has invalid pid {pid}"))?,
|
|
||||||
);
|
|
||||||
|
|
||||||
for retries in 0..RETRIES {
|
|
||||||
match process_started(pid, Some(pid_file_to_check), &process_status_check) {
|
|
||||||
Ok(true) => {
|
|
||||||
println!("\n{process_name} started, pid: {pid}");
|
|
||||||
return Ok(spawned_process);
|
|
||||||
}
|
|
||||||
Ok(false) => {
|
|
||||||
if retries == NOTICE_AFTER_RETRIES {
|
|
||||||
// The process is taking a long time to start up. Keep waiting, but
|
|
||||||
// print a message
|
|
||||||
print!("\n{process_name} has not started yet, continuing to wait");
|
|
||||||
}
|
|
||||||
if retries % DOT_EVERY_RETRIES == 0 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
println!("{process_name} failed to start: {e:#}");
|
|
||||||
if let Err(e) = spawned_process.kill() {
|
|
||||||
println!("Could not stop {process_name} subprocess: {e:#}")
|
|
||||||
};
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
anyhow::bail!("{process_name} did not start in {RETRY_UNTIL_SECS} seconds");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stops the process, using the pid file given. Returns Ok also if the process is already not running.
|
|
||||||
pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> anyhow::Result<()> {
|
|
||||||
let pid = match pid_file::read(pid_file)
|
|
||||||
.with_context(|| format!("read pid_file {pid_file:?}"))?
|
|
||||||
{
|
|
||||||
PidFileRead::NotExist => {
|
|
||||||
println!("{process_name} is already stopped: no pid file present at {pid_file:?}");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
PidFileRead::NotHeldByAnyProcess(_) => {
|
|
||||||
// Don't try to kill according to file contents beacuse the pid might have been re-used by another process.
|
|
||||||
// Don't delete the file either, it can race with new pid file creation.
|
|
||||||
// Read `pid_file` module comment for details.
|
|
||||||
println!(
|
|
||||||
"No process is holding the pidfile. The process must have already exited. Leave in place to avoid race conditions: {pid_file:?}"
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
PidFileRead::LockedByOtherProcess(pid) => pid,
|
|
||||||
};
|
|
||||||
// XXX the pid could become invalid (and recycled) at any time before the kill() below.
|
|
||||||
|
|
||||||
// send signal
|
|
||||||
let sig = if immediate {
|
|
||||||
print!("Stopping {process_name} with pid {pid} immediately..");
|
|
||||||
Signal::SIGQUIT
|
|
||||||
} else {
|
|
||||||
print!("Stopping {process_name} with pid {pid} gracefully..");
|
|
||||||
Signal::SIGTERM
|
|
||||||
};
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
match kill(pid, sig) {
|
|
||||||
Ok(()) => (),
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
// Again, don't delete the pid file. The unlink can race with a new pid file being created.
|
|
||||||
println!(
|
|
||||||
"{process_name} with pid {pid} does not exist, but a pid file {pid_file:?} was found. Likely the pid got recycled. Lucky we didn't harm anyone."
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(e) => anyhow::bail!("Failed to send signal to {process_name} with pid {pid}: {e}"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until process is gone
|
|
||||||
for retries in 0..RETRIES {
|
|
||||||
match process_has_stopped(pid) {
|
|
||||||
Ok(true) => {
|
|
||||||
println!("\n{process_name} stopped");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Ok(false) => {
|
|
||||||
if retries == NOTICE_AFTER_RETRIES {
|
|
||||||
// The process is taking a long time to start up. Keep waiting, but
|
|
||||||
// print a message
|
|
||||||
print!("\n{process_name} has not stopped yet, continuing to wait");
|
|
||||||
}
|
|
||||||
if retries % DOT_EVERY_RETRIES == 0 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
println!("{process_name} with pid {pid} failed to stop: {e:#}");
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
anyhow::bail!("{process_name} with pid {pid} did not stop in {RETRY_UNTIL_SECS} seconds");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
|
||||||
// If RUST_BACKTRACE is set, pass it through. But if it's not set, default
|
|
||||||
// to RUST_BACKTRACE=1.
|
|
||||||
let backtrace_setting = std::env::var_os("RUST_BACKTRACE");
|
|
||||||
let backtrace_setting = backtrace_setting
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| OsStr::new("1"));
|
|
||||||
|
|
||||||
let mut filled_cmd = cmd.env_clear().env("RUST_BACKTRACE", backtrace_setting);
|
|
||||||
|
|
||||||
// Pass through these environment variables to the command
|
|
||||||
for var in ["LLVM_PROFILE_FILE", "FAILPOINTS", "RUST_LOG"] {
|
|
||||||
if let Some(val) = std::env::var_os(var) {
|
|
||||||
filled_cmd = filled_cmd.env(var, val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filled_cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|
||||||
for env_key in [
|
|
||||||
"AWS_ACCESS_KEY_ID",
|
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
|
||||||
"AWS_SESSION_TOKEN",
|
|
||||||
] {
|
|
||||||
if let Ok(value) = std::env::var(env_key) {
|
|
||||||
cmd = cmd.env(env_key, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a `pre_exec` to the cmd that, inbetween fork() and exec(),
|
|
||||||
/// 1. Claims a pidfile with a fcntl lock on it and
|
|
||||||
/// 2. Sets up the pidfile's file descriptor so that it (and the lock)
|
|
||||||
/// will remain held until the cmd exits.
|
|
||||||
fn pre_exec_create_pidfile<P>(cmd: &mut Command, path: P) -> &mut Command
|
|
||||||
where
|
|
||||||
P: Into<PathBuf>,
|
|
||||||
{
|
|
||||||
let path: PathBuf = path.into();
|
|
||||||
// SAFETY
|
|
||||||
// pre_exec is marked unsafe because it runs between fork and exec.
|
|
||||||
// Why is that dangerous in various ways?
|
|
||||||
// Long answer: https://github.com/rust-lang/rust/issues/39575
|
|
||||||
// Short answer: in a multi-threaded program, other threads may have
|
|
||||||
// been inside of critical sections at the time of fork. In the
|
|
||||||
// original process, that was allright, assuming they protected
|
|
||||||
// the critical sections appropriately, e.g., through locks.
|
|
||||||
// Fork adds another process to the mix that
|
|
||||||
// 1. Has a single thread T
|
|
||||||
// 2. In an exact copy of the address space at the time of fork.
|
|
||||||
// A variety of problems scan occur now:
|
|
||||||
// 1. T tries to grab a lock that was locked at the time of fork.
|
|
||||||
// It will wait forever since in its address space, the lock
|
|
||||||
// is in state 'taken' but the thread that would unlock it is
|
|
||||||
// not there.
|
|
||||||
// 2. A rust object that represented some external resource in the
|
|
||||||
// parent now got implicitly copied by the the fork, even though
|
|
||||||
// the object's type is not `Copy`. The parent program may use
|
|
||||||
// non-copyability as way to enforce unique ownership of an
|
|
||||||
// external resource in the typesystem. The fork breaks that
|
|
||||||
// assumption, as now both parent and child process have an
|
|
||||||
// owned instance of the object that represents the same
|
|
||||||
// underlying resource.
|
|
||||||
// While these seem like niche problems, (1) in particular is
|
|
||||||
// highly relevant. For example, `malloc()` may grab a mutex internally,
|
|
||||||
// and so, if we forked while another thread was mallocing' and our
|
|
||||||
// pre_exec closure allocates as well, it will block on the malloc
|
|
||||||
// mutex forever
|
|
||||||
//
|
|
||||||
// The proper solution is to only use C library functions that are marked
|
|
||||||
// "async-signal-safe": https://man7.org/linux/man-pages/man7/signal-safety.7.html
|
|
||||||
//
|
|
||||||
// With this specific pre_exec() closure, the non-error path doesn't allocate.
|
|
||||||
// The error path uses `anyhow`, and hence does allocate.
|
|
||||||
// We take our chances there, hoping that any potential disaster is constrained
|
|
||||||
// to the child process (e.g., malloc has no state ourside of the child process).
|
|
||||||
// Last, `expect` prints to stderr, and stdio is not async-signal-safe.
|
|
||||||
// Again, we take our chances, making the same assumptions as for malloc.
|
|
||||||
unsafe {
|
|
||||||
cmd.pre_exec(move || {
|
|
||||||
let file = pid_file::claim_for_current_process(&path).expect("claim pid file");
|
|
||||||
// Remove the FD_CLOEXEC flag on the pidfile descriptor so that the pidfile
|
|
||||||
// remains locked after exec.
|
|
||||||
nix::fcntl::fcntl(file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::empty()))
|
|
||||||
.expect("remove FD_CLOEXEC");
|
|
||||||
// Don't run drop(file), it would close the file before we actually exec.
|
|
||||||
std::mem::forget(file);
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
}
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_started<F>(
|
|
||||||
pid: Pid,
|
|
||||||
pid_file_to_check: Option<&Path>,
|
|
||||||
status_check: &F,
|
|
||||||
) -> anyhow::Result<bool>
|
|
||||||
where
|
|
||||||
F: Fn() -> anyhow::Result<bool>,
|
|
||||||
{
|
|
||||||
match status_check() {
|
|
||||||
Ok(true) => match pid_file_to_check {
|
|
||||||
Some(pid_file_path) => match pid_file::read(pid_file_path)? {
|
|
||||||
PidFileRead::NotExist => Ok(false),
|
|
||||||
PidFileRead::LockedByOtherProcess(pid_in_file) => Ok(pid_in_file == pid),
|
|
||||||
PidFileRead::NotHeldByAnyProcess(_) => Ok(false),
|
|
||||||
},
|
|
||||||
None => Ok(true),
|
|
||||||
},
|
|
||||||
Ok(false) => Ok(false),
|
|
||||||
Err(e) => anyhow::bail!("process failed to start: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_has_stopped(pid: Pid) -> anyhow::Result<bool> {
|
|
||||||
match kill(pid, None) {
|
|
||||||
// Process exists, keep waiting
|
|
||||||
Ok(_) => Ok(false),
|
|
||||||
// Process not found, we're done
|
|
||||||
Err(Errno::ESRCH) => Ok(true),
|
|
||||||
Err(err) => anyhow::bail!("Failed to send signal to process with pid {pid}: {err}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
|||||||
use anyhow::Context;
|
|
||||||
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use crate::{background_process, local_env};
|
|
||||||
|
|
||||||
pub fn start_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
|
||||||
let broker = &env.broker;
|
|
||||||
let listen_addr = &broker.listen_addr;
|
|
||||||
|
|
||||||
print!("Starting neon broker at {}", listen_addr);
|
|
||||||
|
|
||||||
let args = [format!("--listen-addr={listen_addr}")];
|
|
||||||
|
|
||||||
let client = reqwest::blocking::Client::new();
|
|
||||||
background_process::start_process(
|
|
||||||
"storage_broker",
|
|
||||||
&env.base_data_dir,
|
|
||||||
&env.storage_broker_bin(),
|
|
||||||
args,
|
|
||||||
[],
|
|
||||||
background_process::InitialPidFile::Create(&storage_broker_pid_file_path(env)),
|
|
||||||
|| {
|
|
||||||
let url = broker.client_url();
|
|
||||||
let status_url = url.join("status").with_context(|| {
|
|
||||||
format!("Failed to append /status path to broker endpoint {url}",)
|
|
||||||
})?;
|
|
||||||
let request = client
|
|
||||||
.get(status_url)
|
|
||||||
.build()
|
|
||||||
.with_context(|| format!("Failed to construct request to broker endpoint {url}"))?;
|
|
||||||
match client.execute(request) {
|
|
||||||
Ok(resp) => Ok(resp.status().is_success()),
|
|
||||||
Err(_) => Ok(false),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.context("Failed to spawn storage_broker subprocess")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stop_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
|
||||||
background_process::stop_process(true, "storage_broker", &storage_broker_pid_file_path(env))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn storage_broker_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
|
||||||
env.base_data_dir.join("storage_broker.pid")
|
|
||||||
}
|
|
||||||
@@ -12,14 +12,15 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use utils::{
|
use utils::{
|
||||||
|
connstring::connection_host_port,
|
||||||
id::{TenantId, TimelineId},
|
id::{TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
||||||
use crate::pageserver::PageServerNode;
|
|
||||||
use crate::postgresql_conf::PostgresConf;
|
use crate::postgresql_conf::PostgresConf;
|
||||||
|
use crate::storage::PageServerNode;
|
||||||
|
|
||||||
//
|
//
|
||||||
// ComputeControlPlane
|
// ComputeControlPlane
|
||||||
@@ -44,7 +45,7 @@ impl ComputeControlPlane {
|
|||||||
let mut nodes = BTreeMap::default();
|
let mut nodes = BTreeMap::default();
|
||||||
let pgdatadirspath = &env.pg_data_dirs_path();
|
let pgdatadirspath = &env.pg_data_dirs_path();
|
||||||
|
|
||||||
for tenant_dir in fs::read_dir(pgdatadirspath)
|
for tenant_dir in fs::read_dir(&pgdatadirspath)
|
||||||
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
||||||
{
|
{
|
||||||
let tenant_dir = tenant_dir?;
|
let tenant_dir = tenant_dir?;
|
||||||
@@ -67,8 +68,8 @@ impl ComputeControlPlane {
|
|||||||
fn get_port(&mut self) -> u16 {
|
fn get_port(&mut self) -> u16 {
|
||||||
1 + self
|
1 + self
|
||||||
.nodes
|
.nodes
|
||||||
.values()
|
.iter()
|
||||||
.map(|node| node.address.port())
|
.map(|(_name, node)| node.address.port())
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(self.base_port)
|
.unwrap_or(self.base_port)
|
||||||
}
|
}
|
||||||
@@ -182,18 +183,18 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
||||||
let pg_path = self.env.pg_bin_dir(pg_version)?.join("postgres");
|
let pg_path = self.env.pg_bin_dir(pg_version).join("postgres");
|
||||||
let mut cmd = Command::new(pg_path);
|
let mut cmd = Command::new(&pg_path);
|
||||||
|
|
||||||
cmd.arg("--sync-safekeepers")
|
cmd.arg("--sync-safekeepers")
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env(
|
.env(
|
||||||
"LD_LIBRARY_PATH",
|
"LD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
self.env.pg_lib_dir(pg_version).to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env(
|
.env(
|
||||||
"DYLD_LIBRARY_PATH",
|
"DYLD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
self.env.pg_lib_dir(pg_version).to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
.env("PGDATA", self.pgdata().to_str().unwrap())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
@@ -201,7 +202,7 @@ impl PostgresNode {
|
|||||||
.stderr(Stdio::piped());
|
.stderr(Stdio::piped());
|
||||||
|
|
||||||
if let Some(token) = auth_token {
|
if let Some(token) = auth_token {
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
cmd.env("ZENITH_AUTH_TOKEN", token);
|
||||||
}
|
}
|
||||||
|
|
||||||
let sync_handle = cmd
|
let sync_handle = cmd
|
||||||
@@ -261,7 +262,7 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_pgdata(&self) -> Result<()> {
|
fn create_pgdata(&self) -> Result<()> {
|
||||||
fs::create_dir_all(self.pgdata()).with_context(|| {
|
fs::create_dir_all(&self.pgdata()).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"could not create data directory {}",
|
"could not create data directory {}",
|
||||||
self.pgdata().display()
|
self.pgdata().display()
|
||||||
@@ -281,6 +282,8 @@ impl PostgresNode {
|
|||||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
||||||
let mut conf = PostgresConf::new();
|
let mut conf = PostgresConf::new();
|
||||||
conf.append("max_wal_senders", "10");
|
conf.append("max_wal_senders", "10");
|
||||||
|
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
|
||||||
|
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
|
||||||
conf.append("wal_log_hints", "off");
|
conf.append("wal_log_hints", "off");
|
||||||
conf.append("max_replication_slots", "10");
|
conf.append("max_replication_slots", "10");
|
||||||
conf.append("hot_standby", "on");
|
conf.append("hot_standby", "on");
|
||||||
@@ -299,22 +302,21 @@ impl PostgresNode {
|
|||||||
|
|
||||||
// Configure the node to fetch pages from pageserver
|
// Configure the node to fetch pages from pageserver
|
||||||
let pageserver_connstr = {
|
let pageserver_connstr = {
|
||||||
let config = &self.pageserver.pg_connection_config;
|
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
||||||
let (host, port) = (config.host(), config.port());
|
|
||||||
|
|
||||||
// Set up authentication
|
// Set up authentication
|
||||||
//
|
//
|
||||||
// $NEON_AUTH_TOKEN will be replaced with value from environment
|
// $ZENITH_AUTH_TOKEN will be replaced with value from environment
|
||||||
// variable during compute pg startup. It is done this way because
|
// variable during compute pg startup. It is done this way because
|
||||||
// otherwise user will be able to retrieve the value using SHOW
|
// otherwise user will be able to retrieve the value using SHOW
|
||||||
// command or pg_settings
|
// command or pg_settings
|
||||||
let password = if let AuthType::NeonJWT = auth_type {
|
let password = if let AuthType::NeonJWT = auth_type {
|
||||||
"$NEON_AUTH_TOKEN"
|
"$ZENITH_AUTH_TOKEN"
|
||||||
} else {
|
} else {
|
||||||
""
|
""
|
||||||
};
|
};
|
||||||
// NOTE avoiding spaces in connection string, because it is less error prone if we forward it somewhere.
|
// NOTE avoiding spaces in connection string, because it is less error prone if we forward it somewhere.
|
||||||
// Also note that not all parameters are supported here. Because in compute we substitute $NEON_AUTH_TOKEN
|
// Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN
|
||||||
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
||||||
// uses only needed variables namely host, port, user, password.
|
// uses only needed variables namely host, port, user, password.
|
||||||
format!("postgresql://no_user:{password}@{host}:{port}")
|
format!("postgresql://no_user:{password}@{host}:{port}")
|
||||||
@@ -322,9 +324,6 @@ impl PostgresNode {
|
|||||||
conf.append("shared_preload_libraries", "neon");
|
conf.append("shared_preload_libraries", "neon");
|
||||||
conf.append_line("");
|
conf.append_line("");
|
||||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
||||||
if let AuthType::NeonJWT = auth_type {
|
|
||||||
conf.append("neon.safekeeper_token_env", "$NEON_AUTH_TOKEN");
|
|
||||||
}
|
|
||||||
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
||||||
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
||||||
if let Some(lsn) = self.lsn {
|
if let Some(lsn) = self.lsn {
|
||||||
@@ -346,7 +345,7 @@ impl PostgresNode {
|
|||||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||||
// (if they are not able to upload WAL to S3).
|
// (if they are not able to upload WAL to S3).
|
||||||
conf.append("max_replication_write_lag", "15MB");
|
conf.append("max_replication_write_lag", "500MB");
|
||||||
conf.append("max_replication_flush_lag", "10GB");
|
conf.append("max_replication_flush_lag", "10GB");
|
||||||
|
|
||||||
if !self.env.safekeepers.is_empty() {
|
if !self.env.safekeepers.is_empty() {
|
||||||
@@ -423,7 +422,7 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||||
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version).join("pg_ctl");
|
||||||
let mut cmd = Command::new(pg_ctl_path);
|
let mut cmd = Command::new(pg_ctl_path);
|
||||||
cmd.args(
|
cmd.args(
|
||||||
[
|
[
|
||||||
@@ -441,14 +440,14 @@ impl PostgresNode {
|
|||||||
.env_clear()
|
.env_clear()
|
||||||
.env(
|
.env(
|
||||||
"LD_LIBRARY_PATH",
|
"LD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
self.env.pg_lib_dir(self.pg_version).to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env(
|
.env(
|
||||||
"DYLD_LIBRARY_PATH",
|
"DYLD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
self.env.pg_lib_dir(self.pg_version).to_str().unwrap(),
|
||||||
);
|
);
|
||||||
if let Some(token) = auth_token {
|
if let Some(token) = auth_token {
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
cmd.env("ZENITH_AUTH_TOKEN", token);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
||||||
@@ -478,7 +477,7 @@ impl PostgresNode {
|
|||||||
postgresql_conf_path.to_str().unwrap()
|
postgresql_conf_path.to_str().unwrap()
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
fs::remove_dir_all(&self.pgdata())?;
|
||||||
self.create_pgdata()?;
|
self.create_pgdata()?;
|
||||||
|
|
||||||
// 2. Bring back config files
|
// 2. Bring back config files
|
||||||
@@ -514,7 +513,7 @@ impl PostgresNode {
|
|||||||
"Destroying postgres data directory '{}'",
|
"Destroying postgres data directory '{}'",
|
||||||
self.pgdata().to_str().unwrap()
|
self.pgdata().to_str().unwrap()
|
||||||
);
|
);
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
fs::remove_dir_all(&self.pgdata())?;
|
||||||
} else {
|
} else {
|
||||||
self.pg_ctl(&["stop"], &None)?;
|
self.pg_ctl(&["stop"], &None)?;
|
||||||
}
|
}
|
||||||
|
|||||||
97
control_plane/src/etcd.rs
Normal file
97
control_plane/src/etcd.rs
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
use std::{
|
||||||
|
fs,
|
||||||
|
path::PathBuf,
|
||||||
|
process::{Command, Stdio},
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use nix::{
|
||||||
|
sys::signal::{kill, Signal},
|
||||||
|
unistd::Pid,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{local_env, read_pidfile};
|
||||||
|
|
||||||
|
pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
|
let etcd_broker = &env.etcd_broker;
|
||||||
|
println!(
|
||||||
|
"Starting etcd broker using {}",
|
||||||
|
etcd_broker.etcd_binary_path.display()
|
||||||
|
);
|
||||||
|
|
||||||
|
let etcd_data_dir = env.base_data_dir.join("etcd");
|
||||||
|
fs::create_dir_all(&etcd_data_dir).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to create etcd data dir: {}",
|
||||||
|
etcd_data_dir.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let etcd_stdout_file =
|
||||||
|
fs::File::create(etcd_data_dir.join("etcd.stdout.log")).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to create etcd stout file in directory {}",
|
||||||
|
etcd_data_dir.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let etcd_stderr_file =
|
||||||
|
fs::File::create(etcd_data_dir.join("etcd.stderr.log")).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to create etcd stderr file in directory {}",
|
||||||
|
etcd_data_dir.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let client_urls = etcd_broker.comma_separated_endpoints();
|
||||||
|
|
||||||
|
let etcd_process = Command::new(&etcd_broker.etcd_binary_path)
|
||||||
|
.args(&[
|
||||||
|
format!("--data-dir={}", etcd_data_dir.display()),
|
||||||
|
format!("--listen-client-urls={client_urls}"),
|
||||||
|
format!("--advertise-client-urls={client_urls}"),
|
||||||
|
// Set --quota-backend-bytes to keep the etcd virtual memory
|
||||||
|
// size smaller. Our test etcd clusters are very small.
|
||||||
|
// See https://github.com/etcd-io/etcd/issues/7910
|
||||||
|
"--quota-backend-bytes=100000000".to_string(),
|
||||||
|
])
|
||||||
|
.stdout(Stdio::from(etcd_stdout_file))
|
||||||
|
.stderr(Stdio::from(etcd_stderr_file))
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn etcd subprocess")?;
|
||||||
|
let pid = etcd_process.id();
|
||||||
|
|
||||||
|
let etcd_pid_file_path = etcd_pid_file_path(env);
|
||||||
|
fs::write(&etcd_pid_file_path, pid.to_string()).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to create etcd pid file at {}",
|
||||||
|
etcd_pid_file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stop_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
|
let etcd_path = &env.etcd_broker.etcd_binary_path;
|
||||||
|
println!("Stopping etcd broker at {}", etcd_path.display());
|
||||||
|
|
||||||
|
let etcd_pid_file_path = etcd_pid_file_path(env);
|
||||||
|
let pid = Pid::from_raw(read_pidfile(&etcd_pid_file_path).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to read etcd pid file at {}",
|
||||||
|
etcd_pid_file_path.display()
|
||||||
|
)
|
||||||
|
})?);
|
||||||
|
|
||||||
|
kill(pid, Signal::SIGTERM).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to stop etcd with pid {pid} at {}",
|
||||||
|
etcd_pid_file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn etcd_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
||||||
|
env.base_data_dir.join("etcd.pid")
|
||||||
|
}
|
||||||
@@ -6,11 +6,59 @@
|
|||||||
// Intended to be used in integration tests and in CLI tools for
|
// Intended to be used in integration tests and in CLI tools for
|
||||||
// local installations.
|
// local installations.
|
||||||
//
|
//
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
mod background_process;
|
|
||||||
pub mod broker;
|
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
|
pub mod etcd;
|
||||||
pub mod local_env;
|
pub mod local_env;
|
||||||
pub mod pageserver;
|
|
||||||
pub mod postgresql_conf;
|
pub mod postgresql_conf;
|
||||||
pub mod safekeeper;
|
pub mod safekeeper;
|
||||||
|
pub mod storage;
|
||||||
|
|
||||||
|
/// Read a PID file
|
||||||
|
///
|
||||||
|
/// We expect a file that contains a single integer.
|
||||||
|
/// We return an i32 for compatibility with libc and nix.
|
||||||
|
pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
|
||||||
|
let pid_str = fs::read_to_string(pidfile)
|
||||||
|
.with_context(|| format!("failed to read pidfile {:?}", pidfile))?;
|
||||||
|
let pid: i32 = pid_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| anyhow!("failed to parse pidfile {:?}", pidfile))?;
|
||||||
|
if pid < 1 {
|
||||||
|
bail!("pidfile {:?} contained bad value '{}'", pidfile, pid);
|
||||||
|
}
|
||||||
|
Ok(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
||||||
|
let cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
||||||
|
|
||||||
|
let var = "LLVM_PROFILE_FILE";
|
||||||
|
if let Some(val) = std::env::var_os(var) {
|
||||||
|
cmd.env(var, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
const RUST_LOG_KEY: &str = "RUST_LOG";
|
||||||
|
if let Ok(rust_log_value) = std::env::var(RUST_LOG_KEY) {
|
||||||
|
cmd.env(RUST_LOG_KEY, rust_log_value)
|
||||||
|
} else {
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
||||||
|
for env_key in [
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_SESSION_TOKEN",
|
||||||
|
] {
|
||||||
|
if let Ok(value) = std::env::var(env_key) {
|
||||||
|
cmd = cmd.env(env_key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,16 +4,12 @@
|
|||||||
//! script which will use local paths.
|
//! script which will use local paths.
|
||||||
|
|
||||||
use anyhow::{bail, ensure, Context};
|
use anyhow::{bail, ensure, Context};
|
||||||
|
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::net::IpAddr;
|
|
||||||
use std::net::Ipv4Addr;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -66,7 +62,7 @@ pub struct LocalEnv {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub private_key_path: PathBuf,
|
pub private_key_path: PathBuf,
|
||||||
|
|
||||||
pub broker: NeonBroker,
|
pub etcd_broker: EtcdBroker,
|
||||||
|
|
||||||
pub pageserver: PageServerConf,
|
pub pageserver: PageServerConf,
|
||||||
|
|
||||||
@@ -82,26 +78,67 @@ pub struct LocalEnv {
|
|||||||
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broker config for cluster internal communication.
|
/// Etcd broker config for cluster internal communication.
|
||||||
|
#[serde_as]
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
#[serde(default)]
|
pub struct EtcdBroker {
|
||||||
pub struct NeonBroker {
|
/// A prefix to all to any key when pushing/polling etcd from a node.
|
||||||
/// Broker listen address for storage nodes coordination, e.g. '127.0.0.1:50051'.
|
#[serde(default)]
|
||||||
pub listen_addr: SocketAddr,
|
pub broker_etcd_prefix: Option<String>,
|
||||||
|
|
||||||
|
/// Broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'.
|
||||||
|
#[serde(default)]
|
||||||
|
#[serde_as(as = "Vec<DisplayFromStr>")]
|
||||||
|
pub broker_endpoints: Vec<Url>,
|
||||||
|
|
||||||
|
/// Etcd binary path to use.
|
||||||
|
#[serde(default)]
|
||||||
|
pub etcd_binary_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dummy Default impl to satisfy Deserialize derive.
|
impl EtcdBroker {
|
||||||
impl Default for NeonBroker {
|
pub fn locate_etcd() -> anyhow::Result<PathBuf> {
|
||||||
fn default() -> Self {
|
let which_output = Command::new("which")
|
||||||
NeonBroker {
|
.arg("etcd")
|
||||||
listen_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0),
|
.output()
|
||||||
}
|
.context("Failed to run 'which etcd' command")?;
|
||||||
|
let stdout = String::from_utf8_lossy(&which_output.stdout);
|
||||||
|
ensure!(
|
||||||
|
which_output.status.success(),
|
||||||
|
"'which etcd' invocation failed. Status: {}, stdout: {stdout}, stderr: {}",
|
||||||
|
which_output.status,
|
||||||
|
String::from_utf8_lossy(&which_output.stderr)
|
||||||
|
);
|
||||||
|
|
||||||
|
let etcd_path = PathBuf::from(stdout.trim());
|
||||||
|
ensure!(
|
||||||
|
etcd_path.is_file(),
|
||||||
|
"'which etcd' invocation was successful, but the path it returned is not a file or does not exist: {}",
|
||||||
|
etcd_path.display()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(etcd_path)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl NeonBroker {
|
pub fn comma_separated_endpoints(&self) -> String {
|
||||||
pub fn client_url(&self) -> Url {
|
self.broker_endpoints
|
||||||
Url::parse(&format!("http://{}", self.listen_addr)).expect("failed to construct url")
|
.iter()
|
||||||
|
.map(|url| {
|
||||||
|
// URL by default adds a '/' path at the end, which is not what etcd CLI wants.
|
||||||
|
let url_string = url.as_str();
|
||||||
|
if url_string.ends_with('/') {
|
||||||
|
&url_string[0..url_string.len() - 1]
|
||||||
|
} else {
|
||||||
|
url_string
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.fold(String::new(), |mut comma_separated_urls, url| {
|
||||||
|
if !comma_separated_urls.is_empty() {
|
||||||
|
comma_separated_urls.push(',');
|
||||||
|
}
|
||||||
|
comma_separated_urls.push_str(url);
|
||||||
|
comma_separated_urls
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,41 +201,37 @@ impl LocalEnv {
|
|||||||
self.pg_distrib_dir.clone()
|
self.pg_distrib_dir.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
pub fn pg_distrib_dir(&self, pg_version: u32) -> PathBuf {
|
||||||
let path = self.pg_distrib_dir.clone();
|
let path = self.pg_distrib_dir.clone();
|
||||||
|
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => Ok(path.join(format!("v{pg_version}"))),
|
14 => path.join(format!("v{pg_version}")),
|
||||||
15 => Ok(path.join(format!("v{pg_version}"))),
|
15 => path.join(format!("v{pg_version}")),
|
||||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
_ => panic!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
pub fn pg_bin_dir(&self, pg_version: u32) -> PathBuf {
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
14 => self.pg_distrib_dir(pg_version).join("bin"),
|
||||||
15 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
15 => self.pg_distrib_dir(pg_version).join("bin"),
|
||||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
_ => panic!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
pub fn pg_lib_dir(&self, pg_version: u32) -> PathBuf {
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
14 => self.pg_distrib_dir(pg_version).join("lib"),
|
||||||
15 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
15 => self.pg_distrib_dir(pg_version).join("lib"),
|
||||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
_ => panic!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pageserver_bin(&self) -> PathBuf {
|
pub fn pageserver_bin(&self) -> anyhow::Result<PathBuf> {
|
||||||
self.neon_distrib_dir.join("pageserver")
|
Ok(self.neon_distrib_dir.join("pageserver"))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn safekeeper_bin(&self) -> PathBuf {
|
pub fn safekeeper_bin(&self) -> anyhow::Result<PathBuf> {
|
||||||
self.neon_distrib_dir.join("safekeeper")
|
Ok(self.neon_distrib_dir.join("safekeeper"))
|
||||||
}
|
|
||||||
|
|
||||||
pub fn storage_broker_bin(&self) -> PathBuf {
|
|
||||||
self.neon_distrib_dir.join("storage_broker")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
||||||
@@ -296,6 +329,11 @@ impl LocalEnv {
|
|||||||
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If no initial tenant ID was given, generate it.
|
||||||
|
if env.default_tenant_id.is_none() {
|
||||||
|
env.default_tenant_id = Some(TenantId::generate());
|
||||||
|
}
|
||||||
|
|
||||||
env.base_data_dir = base_path();
|
env.base_data_dir = base_path();
|
||||||
|
|
||||||
Ok(env)
|
Ok(env)
|
||||||
@@ -384,10 +422,10 @@ impl LocalEnv {
|
|||||||
"directory '{}' already exists. Perhaps already initialized?",
|
"directory '{}' already exists. Perhaps already initialized?",
|
||||||
base_path.display()
|
base_path.display()
|
||||||
);
|
);
|
||||||
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
if !self.pg_bin_dir(pg_version).join("postgres").exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Can't find postgres binary at {}",
|
"Can't find postgres binary at {}",
|
||||||
self.pg_bin_dir(pg_version)?.display()
|
self.pg_bin_dir(pg_version).display()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
for binary in ["pageserver", "safekeeper"] {
|
for binary in ["pageserver", "safekeeper"] {
|
||||||
@@ -399,7 +437,7 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir(base_path)?;
|
fs::create_dir(&base_path)?;
|
||||||
|
|
||||||
// generate keys for jwt
|
// generate keys for jwt
|
||||||
// openssl genrsa -out private_key.pem 2048
|
// openssl genrsa -out private_key.pem 2048
|
||||||
@@ -408,7 +446,7 @@ impl LocalEnv {
|
|||||||
private_key_path = base_path.join("auth_private_key.pem");
|
private_key_path = base_path.join("auth_private_key.pem");
|
||||||
let keygen_output = Command::new("openssl")
|
let keygen_output = Command::new("openssl")
|
||||||
.arg("genrsa")
|
.arg("genrsa")
|
||||||
.args(["-out", private_key_path.to_str().unwrap()])
|
.args(&["-out", private_key_path.to_str().unwrap()])
|
||||||
.arg("2048")
|
.arg("2048")
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.output()
|
.output()
|
||||||
@@ -425,10 +463,10 @@ impl LocalEnv {
|
|||||||
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||||
let keygen_output = Command::new("openssl")
|
let keygen_output = Command::new("openssl")
|
||||||
.arg("rsa")
|
.arg("rsa")
|
||||||
.args(["-in", private_key_path.to_str().unwrap()])
|
.args(&["-in", private_key_path.to_str().unwrap()])
|
||||||
.arg("-pubout")
|
.arg("-pubout")
|
||||||
.args(["-outform", "PEM"])
|
.args(&["-outform", "PEM"])
|
||||||
.args(["-out", public_key_path.to_str().unwrap()])
|
.args(&["-out", public_key_path.to_str().unwrap()])
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.output()
|
.output()
|
||||||
.context("failed to generate auth private key")?;
|
.context("failed to generate auth private key")?;
|
||||||
@@ -473,8 +511,8 @@ mod tests {
|
|||||||
"failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}"
|
"failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
let string_to_replace = "listen_addr = '127.0.0.1:50051'";
|
let string_to_replace = "broker_endpoints = ['http://127.0.0.1:2379']";
|
||||||
let spoiled_url_str = "listen_addr = '!@$XOXO%^&'";
|
let spoiled_url_str = "broker_endpoints = ['!@$XOXO%^&']";
|
||||||
let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str);
|
let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str);
|
||||||
assert!(
|
assert!(
|
||||||
spoiled_url_toml.contains(spoiled_url_str),
|
spoiled_url_toml.contains(spoiled_url_str),
|
||||||
|
|||||||
@@ -1,22 +1,29 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Child;
|
use std::process::Command;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{io, result};
|
use std::time::Duration;
|
||||||
|
use std::{io, result, thread};
|
||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::bail;
|
||||||
use postgres_connection::PgConnectionConfig;
|
use nix::errno::Errno;
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use postgres::Config;
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
use reqwest::{IntoUrl, Method};
|
use reqwest::{IntoUrl, Method};
|
||||||
|
use safekeeper::http::models::TimelineCreateRequest;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use utils::{http::error::HttpErrorBody, id::NodeId};
|
use utils::{
|
||||||
|
connstring::connection_address,
|
||||||
use crate::pageserver::PageServerNode;
|
http::error::HttpErrorBody,
|
||||||
use crate::{
|
id::{NodeId, TenantId, TimelineId},
|
||||||
background_process,
|
|
||||||
local_env::{LocalEnv, SafekeeperConf},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::local_env::{LocalEnv, SafekeeperConf};
|
||||||
|
use crate::storage::PageServerNode;
|
||||||
|
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum SafekeeperHttpError {
|
pub enum SafekeeperHttpError {
|
||||||
#[error("Reqwest error: {0}")]
|
#[error("Reqwest error: {0}")]
|
||||||
@@ -61,7 +68,7 @@ pub struct SafekeeperNode {
|
|||||||
|
|
||||||
pub conf: SafekeeperConf,
|
pub conf: SafekeeperConf,
|
||||||
|
|
||||||
pub pg_connection_config: PgConnectionConfig,
|
pub pg_connection_config: Config,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: Client,
|
pub http_client: Client,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
@@ -85,12 +92,15 @@ impl SafekeeperNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to this safekeeper.
|
/// Construct libpq connection string for connecting to this safekeeper.
|
||||||
fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
|
fn safekeeper_connection_config(port: u16) -> Config {
|
||||||
PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port)
|
// TODO safekeeper authentication not implemented yet
|
||||||
|
format!("postgresql://no_user@127.0.0.1:{}/no_db", port)
|
||||||
|
.parse()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
||||||
env.safekeeper_data_dir(&format!("sk{sk_id}"))
|
env.safekeeper_data_dir(format!("sk{}", sk_id).as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path(&self) -> PathBuf {
|
pub fn datadir_path(&self) -> PathBuf {
|
||||||
@@ -101,74 +111,92 @@ impl SafekeeperNode {
|
|||||||
self.datadir_path().join("safekeeper.pid")
|
self.datadir_path().join("safekeeper.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self) -> anyhow::Result<Child> {
|
pub fn start(&self) -> anyhow::Result<()> {
|
||||||
print!(
|
print!(
|
||||||
"Starting safekeeper at '{}' in '{}'",
|
"Starting safekeeper at '{}' in '{}'",
|
||||||
self.pg_connection_config.raw_address(),
|
connection_address(&self.pg_connection_config),
|
||||||
self.datadir_path().display()
|
self.datadir_path().display()
|
||||||
);
|
);
|
||||||
io::stdout().flush().unwrap();
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
||||||
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
||||||
let id = self.id;
|
|
||||||
let datadir = self.datadir_path();
|
|
||||||
|
|
||||||
let id_string = id.to_string();
|
let mut cmd = Command::new(self.env.safekeeper_bin()?);
|
||||||
let mut args = vec![
|
fill_rust_env_vars(
|
||||||
"-D",
|
cmd.args(&["-D", self.datadir_path().to_str().unwrap()])
|
||||||
datadir.to_str().with_context(|| {
|
.args(&["--id", self.id.to_string().as_ref()])
|
||||||
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
.args(&["--listen-pg", &listen_pg])
|
||||||
})?,
|
.args(&["--listen-http", &listen_http])
|
||||||
"--id",
|
.args(&["--recall", "1 second"])
|
||||||
&id_string,
|
.arg("--daemonize"),
|
||||||
"--listen-pg",
|
);
|
||||||
&listen_pg,
|
|
||||||
"--listen-http",
|
|
||||||
&listen_http,
|
|
||||||
];
|
|
||||||
if !self.conf.sync {
|
if !self.conf.sync {
|
||||||
args.push("--no-sync");
|
cmd.arg("--no-sync");
|
||||||
}
|
}
|
||||||
|
|
||||||
let broker_endpoint = format!("{}", self.env.broker.client_url());
|
let comma_separated_endpoints = self.env.etcd_broker.comma_separated_endpoints();
|
||||||
args.extend(["--broker-endpoint", &broker_endpoint]);
|
if !comma_separated_endpoints.is_empty() {
|
||||||
|
cmd.args(&["--broker-endpoints", &comma_separated_endpoints]);
|
||||||
let mut backup_threads = String::new();
|
}
|
||||||
|
if let Some(prefix) = self.env.etcd_broker.broker_etcd_prefix.as_deref() {
|
||||||
|
cmd.args(&["--broker-etcd-prefix", prefix]);
|
||||||
|
}
|
||||||
if let Some(threads) = self.conf.backup_threads {
|
if let Some(threads) = self.conf.backup_threads {
|
||||||
backup_threads = threads.to_string();
|
cmd.args(&["--backup-threads", threads.to_string().as_ref()]);
|
||||||
args.extend(["--backup-threads", &backup_threads]);
|
|
||||||
} else {
|
|
||||||
drop(backup_threads);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref remote_storage) = self.conf.remote_storage {
|
if let Some(ref remote_storage) = self.conf.remote_storage {
|
||||||
args.extend(["--remote-storage", remote_storage]);
|
cmd.args(&["--remote-storage", remote_storage]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let key_path = self.env.base_data_dir.join("auth_public_key.pem");
|
|
||||||
if self.conf.auth_enabled {
|
if self.conf.auth_enabled {
|
||||||
args.extend([
|
cmd.arg("--auth-validation-public-key-path");
|
||||||
"--auth-validation-public-key-path",
|
// PathBuf is better be passed as is, not via `String`.
|
||||||
key_path.to_str().with_context(|| {
|
cmd.arg(self.env.base_data_dir.join("auth_public_key.pem"));
|
||||||
format!("Key path {key_path:?} cannot be represented as a unicode string")
|
|
||||||
})?,
|
|
||||||
]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
background_process::start_process(
|
fill_aws_secrets_vars(&mut cmd);
|
||||||
&format!("safekeeper {id}"),
|
|
||||||
&datadir,
|
if !cmd.status()?.success() {
|
||||||
&self.env.safekeeper_bin(),
|
bail!(
|
||||||
&args,
|
"Safekeeper failed to start. See '{}' for details.",
|
||||||
[],
|
self.datadir_path().join("safekeeper.log").display()
|
||||||
background_process::InitialPidFile::Expect(&self.pid_file()),
|
);
|
||||||
|| match self.check_status() {
|
}
|
||||||
Ok(()) => Ok(true),
|
|
||||||
Err(SafekeeperHttpError::Transport(_)) => Ok(false),
|
// It takes a while for the safekeeper to start up. Wait until it is
|
||||||
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
// open for business.
|
||||||
},
|
const RETRIES: i8 = 15;
|
||||||
)
|
for retries in 1..RETRIES {
|
||||||
|
match self.check_status() {
|
||||||
|
Ok(_) => {
|
||||||
|
println!("\nSafekeeper started");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
match err {
|
||||||
|
SafekeeperHttpError::Transport(err) => {
|
||||||
|
if err.is_connect() && retries < 5 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
} else {
|
||||||
|
if retries == 5 {
|
||||||
|
println!() // put a line break after dots for second message
|
||||||
|
}
|
||||||
|
println!(
|
||||||
|
"Safekeeper not responding yet, err {} retrying ({})...",
|
||||||
|
err, retries
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SafekeeperHttpError::Response(msg) => {
|
||||||
|
bail!("safekeeper failed to start: {} ", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_secs(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bail!("safekeeper failed to start in {} seconds", RETRIES);
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -180,11 +208,63 @@ impl SafekeeperNode {
|
|||||||
/// If the server is not running, returns success
|
/// If the server is not running, returns success
|
||||||
///
|
///
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
background_process::stop_process(
|
let pid_file = self.pid_file();
|
||||||
immediate,
|
if !pid_file.exists() {
|
||||||
&format!("safekeeper {}", self.id),
|
println!("Safekeeper {} is already stopped", self.id);
|
||||||
&self.pid_file(),
|
return Ok(());
|
||||||
)
|
}
|
||||||
|
let pid = read_pidfile(&pid_file)?;
|
||||||
|
let pid = Pid::from_raw(pid);
|
||||||
|
|
||||||
|
let sig = if immediate {
|
||||||
|
print!("Stopping safekeeper {} immediately..", self.id);
|
||||||
|
Signal::SIGQUIT
|
||||||
|
} else {
|
||||||
|
print!("Stopping safekeeper {} gracefully..", self.id);
|
||||||
|
Signal::SIGTERM
|
||||||
|
};
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
match kill(pid, sig) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
println!(
|
||||||
|
"Safekeeper with pid {} does not exist, but a PID file was found",
|
||||||
|
pid
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => bail!(
|
||||||
|
"Failed to send signal to safekeeper with pid {}: {}",
|
||||||
|
pid,
|
||||||
|
err.desc()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until process is gone
|
||||||
|
for i in 0..600 {
|
||||||
|
let signal = None; // Send no signal, just get the error code
|
||||||
|
match kill(pid, signal) {
|
||||||
|
Ok(_) => (), // Process exists, keep waiting
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
// Process not found, we're done
|
||||||
|
println!("done!");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => bail!(
|
||||||
|
"Failed to send signal to pageserver with pid {}: {}",
|
||||||
|
pid,
|
||||||
|
err.desc()
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
if i % 10 == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("Failed to stop safekeeper with pid {}", pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
@@ -201,4 +281,24 @@ impl SafekeeperNode {
|
|||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn timeline_create(
|
||||||
|
&self,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
peer_ids: Vec<NodeId>,
|
||||||
|
) -> Result<()> {
|
||||||
|
Ok(self
|
||||||
|
.http_request(
|
||||||
|
Method::POST,
|
||||||
|
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||||
|
)
|
||||||
|
.json(&TimelineCreateRequest {
|
||||||
|
timeline_id,
|
||||||
|
peer_ids,
|
||||||
|
})
|
||||||
|
.send()?
|
||||||
|
.error_from_body()?
|
||||||
|
.json()?)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +1,33 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufReader, Write};
|
use std::io::{BufReader, Write};
|
||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
use std::path::PathBuf;
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::{Child, Command};
|
use std::process::Command;
|
||||||
use std::{io, result};
|
use std::time::Duration;
|
||||||
|
use std::{io, result, thread};
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use pageserver_api::models::{
|
use nix::errno::Errno;
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use pageserver::http::models::{
|
||||||
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
||||||
};
|
};
|
||||||
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
use postgres::{Config, NoTls};
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
use reqwest::{IntoUrl, Method};
|
use reqwest::{IntoUrl, Method};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use utils::auth::{Claims, Scope};
|
|
||||||
use utils::{
|
use utils::{
|
||||||
|
connstring::connection_address,
|
||||||
http::error::HttpErrorBody,
|
http::error::HttpErrorBody,
|
||||||
id::{TenantId, TimelineId},
|
id::{TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{background_process, local_env::LocalEnv};
|
use crate::local_env::LocalEnv;
|
||||||
|
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum PageserverHttpError {
|
pub enum PageserverHttpError {
|
||||||
@@ -71,7 +75,7 @@ impl ResponseErrorMessageExt for Response {
|
|||||||
//
|
//
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PageServerNode {
|
pub struct PageServerNode {
|
||||||
pub pg_connection_config: PgConnectionConfig,
|
pub pg_connection_config: Config,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: Client,
|
pub http_client: Client,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
@@ -79,26 +83,37 @@ pub struct PageServerNode {
|
|||||||
|
|
||||||
impl PageServerNode {
|
impl PageServerNode {
|
||||||
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
||||||
let (host, port) = parse_host_port(&env.pageserver.listen_pg_addr)
|
|
||||||
.expect("Unable to parse listen_pg_addr");
|
|
||||||
let port = port.unwrap_or(5432);
|
|
||||||
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
||||||
Some(env.pageserver.auth_token.clone())
|
&env.pageserver.auth_token
|
||||||
} else {
|
} else {
|
||||||
None
|
""
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
pg_connection_config: PgConnectionConfig::new_host_port(host, port)
|
pg_connection_config: Self::pageserver_connection_config(
|
||||||
.set_password(password),
|
password,
|
||||||
|
&env.pageserver.listen_pg_addr,
|
||||||
|
),
|
||||||
env: env.clone(),
|
env: env.clone(),
|
||||||
http_client: Client::new(),
|
http_client: Client::new(),
|
||||||
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// pageserver conf overrides defined by neon_local configuration.
|
/// Construct libpq connection string for connecting to the pageserver.
|
||||||
fn neon_local_overrides(&self) -> Vec<String> {
|
fn pageserver_connection_config(password: &str, listen_addr: &str) -> Config {
|
||||||
|
format!("postgresql://no_user:{password}@{listen_addr}/no_db")
|
||||||
|
.parse()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn initialize(
|
||||||
|
&self,
|
||||||
|
create_tenant: Option<TenantId>,
|
||||||
|
initial_timeline_id: Option<TimelineId>,
|
||||||
|
config_overrides: &[&str],
|
||||||
|
pg_version: u32,
|
||||||
|
) -> anyhow::Result<TimelineId> {
|
||||||
let id = format!("id={}", self.env.pageserver.id);
|
let id = format!("id={}", self.env.pageserver.id);
|
||||||
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
||||||
let pg_distrib_dir_param = format!(
|
let pg_distrib_dir_param = format!(
|
||||||
@@ -113,149 +128,155 @@ impl PageServerNode {
|
|||||||
);
|
);
|
||||||
let listen_pg_addr_param =
|
let listen_pg_addr_param =
|
||||||
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
||||||
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
|
let broker_endpoints_param = format!(
|
||||||
|
"broker_endpoints=[{}]",
|
||||||
|
self.env
|
||||||
|
.etcd_broker
|
||||||
|
.broker_endpoints
|
||||||
|
.iter()
|
||||||
|
.map(|url| format!("'{url}'"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",")
|
||||||
|
);
|
||||||
|
let broker_etcd_prefix_param = self
|
||||||
|
.env
|
||||||
|
.etcd_broker
|
||||||
|
.broker_etcd_prefix
|
||||||
|
.as_ref()
|
||||||
|
.map(|prefix| format!("broker_etcd_prefix='{prefix}'"));
|
||||||
|
|
||||||
let mut overrides = vec![
|
let mut init_config_overrides = config_overrides.to_vec();
|
||||||
id,
|
init_config_overrides.push(&id);
|
||||||
pg_distrib_dir_param,
|
init_config_overrides.push(&pg_distrib_dir_param);
|
||||||
authg_type_param,
|
init_config_overrides.push(&authg_type_param);
|
||||||
listen_http_addr_param,
|
init_config_overrides.push(&listen_http_addr_param);
|
||||||
listen_pg_addr_param,
|
init_config_overrides.push(&listen_pg_addr_param);
|
||||||
broker_endpoint_param,
|
init_config_overrides.push(&broker_endpoints_param);
|
||||||
];
|
|
||||||
|
if let Some(broker_etcd_prefix_param) = broker_etcd_prefix_param.as_deref() {
|
||||||
|
init_config_overrides.push(broker_etcd_prefix_param);
|
||||||
|
}
|
||||||
|
|
||||||
if self.env.pageserver.auth_type != AuthType::Trust {
|
if self.env.pageserver.auth_type != AuthType::Trust {
|
||||||
overrides.push("auth_validation_public_key_path='auth_public_key.pem'".to_owned());
|
init_config_overrides.push("auth_validation_public_key_path='auth_public_key.pem'");
|
||||||
}
|
}
|
||||||
overrides
|
|
||||||
|
self.start_node(&init_config_overrides, &self.env.base_data_dir, true)?;
|
||||||
|
let init_result = self
|
||||||
|
.try_init_timeline(create_tenant, initial_timeline_id, pg_version)
|
||||||
|
.context("Failed to create initial tenant and timeline for pageserver");
|
||||||
|
match &init_result {
|
||||||
|
Ok(initial_timeline_id) => {
|
||||||
|
println!("Successfully initialized timeline {initial_timeline_id}")
|
||||||
|
}
|
||||||
|
Err(e) => eprintln!("{e:#}"),
|
||||||
|
}
|
||||||
|
self.stop(false)?;
|
||||||
|
init_result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initializes a pageserver node by creating its config with the overrides provided.
|
fn try_init_timeline(
|
||||||
pub fn initialize(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
&self,
|
||||||
// First, run `pageserver --init` and wait for it to write a config into FS and exit.
|
new_tenant_id: Option<TenantId>,
|
||||||
self.pageserver_init(config_overrides).with_context(|| {
|
new_timeline_id: Option<TimelineId>,
|
||||||
format!(
|
pg_version: u32,
|
||||||
"Failed to run init for pageserver node {}",
|
) -> anyhow::Result<TimelineId> {
|
||||||
self.env.pageserver.id,
|
let initial_tenant_id = self.tenant_create(new_tenant_id, HashMap::new())?;
|
||||||
)
|
let initial_timeline_info = self.timeline_create(
|
||||||
})
|
initial_tenant_id,
|
||||||
|
new_timeline_id,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(pg_version),
|
||||||
|
)?;
|
||||||
|
Ok(initial_timeline_info.timeline_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn repo_path(&self) -> PathBuf {
|
pub fn repo_path(&self) -> PathBuf {
|
||||||
self.env.pageserver_data_dir()
|
self.env.pageserver_data_dir()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The pid file is created by the pageserver process, with its pid stored inside.
|
pub fn pid_file(&self) -> PathBuf {
|
||||||
/// Other pageservers cannot lock the same file and overwrite it for as long as the current
|
|
||||||
/// pageserver runs. (Unless someone removes the file manually; never do that!)
|
|
||||||
fn pid_file(&self) -> PathBuf {
|
|
||||||
self.repo_path().join("pageserver.pid")
|
self.repo_path().join("pageserver.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<Child> {
|
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
||||||
self.start_node(config_overrides, false)
|
self.start_node(config_overrides, &self.repo_path(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
fn start_node(
|
||||||
let datadir = self.repo_path();
|
|
||||||
let node_id = self.env.pageserver.id;
|
|
||||||
println!(
|
|
||||||
"Initializing pageserver node {} at '{}' in {:?}",
|
|
||||||
node_id,
|
|
||||||
self.pg_connection_config.raw_address(),
|
|
||||||
datadir
|
|
||||||
);
|
|
||||||
io::stdout().flush()?;
|
|
||||||
|
|
||||||
let datadir_path_str = datadir.to_str().with_context(|| {
|
|
||||||
format!("Cannot start pageserver node {node_id} in path that has no string representation: {datadir:?}")
|
|
||||||
})?;
|
|
||||||
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
|
||||||
args.push(Cow::Borrowed("--init"));
|
|
||||||
|
|
||||||
let init_output = Command::new(self.env.pageserver_bin())
|
|
||||||
.args(args.iter().map(Cow::as_ref))
|
|
||||||
.envs(self.pageserver_env_variables()?)
|
|
||||||
.output()
|
|
||||||
.with_context(|| format!("Failed to run pageserver init for node {node_id}"))?;
|
|
||||||
|
|
||||||
anyhow::ensure!(
|
|
||||||
init_output.status.success(),
|
|
||||||
"Pageserver init for node {} did not finish successfully, stdout: {}, stderr: {}",
|
|
||||||
node_id,
|
|
||||||
String::from_utf8_lossy(&init_output.stdout),
|
|
||||||
String::from_utf8_lossy(&init_output.stderr),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_node(&self, config_overrides: &[&str], update_config: bool) -> anyhow::Result<Child> {
|
|
||||||
let mut overrides = self.neon_local_overrides();
|
|
||||||
overrides.extend(config_overrides.iter().map(|&c| c.to_owned()));
|
|
||||||
|
|
||||||
let datadir = self.repo_path();
|
|
||||||
print!(
|
|
||||||
"Starting pageserver node {} at '{}' in {:?}",
|
|
||||||
self.env.pageserver.id,
|
|
||||||
self.pg_connection_config.raw_address(),
|
|
||||||
datadir
|
|
||||||
);
|
|
||||||
io::stdout().flush()?;
|
|
||||||
|
|
||||||
let datadir_path_str = datadir.to_str().with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Cannot start pageserver node {} in path that has no string representation: {:?}",
|
|
||||||
self.env.pageserver.id, datadir,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
|
||||||
if update_config {
|
|
||||||
args.push(Cow::Borrowed("--update-config"));
|
|
||||||
}
|
|
||||||
|
|
||||||
background_process::start_process(
|
|
||||||
"pageserver",
|
|
||||||
&datadir,
|
|
||||||
&self.env.pageserver_bin(),
|
|
||||||
args.iter().map(Cow::as_ref),
|
|
||||||
self.pageserver_env_variables()?,
|
|
||||||
background_process::InitialPidFile::Expect(&self.pid_file()),
|
|
||||||
|| match self.check_status() {
|
|
||||||
Ok(()) => Ok(true),
|
|
||||||
Err(PageserverHttpError::Transport(_)) => Ok(false),
|
|
||||||
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pageserver_basic_args<'a>(
|
|
||||||
&self,
|
&self,
|
||||||
config_overrides: &'a [&'a str],
|
config_overrides: &[&str],
|
||||||
datadir_path_str: &'a str,
|
datadir: &Path,
|
||||||
) -> Vec<Cow<'a, str>> {
|
update_config: bool,
|
||||||
let mut args = vec![Cow::Borrowed("-D"), Cow::Borrowed(datadir_path_str)];
|
) -> anyhow::Result<()> {
|
||||||
|
println!(
|
||||||
|
"Starting pageserver at '{}' in '{}'",
|
||||||
|
connection_address(&self.pg_connection_config),
|
||||||
|
datadir.display()
|
||||||
|
);
|
||||||
|
io::stdout().flush()?;
|
||||||
|
|
||||||
let mut overrides = self.neon_local_overrides();
|
let mut args = vec![
|
||||||
overrides.extend(config_overrides.iter().map(|&c| c.to_owned()));
|
"-D",
|
||||||
for config_override in overrides {
|
datadir.to_str().with_context(|| {
|
||||||
args.push(Cow::Borrowed("-c"));
|
format!(
|
||||||
args.push(Cow::Owned(config_override));
|
"Datadir path '{}' cannot be represented as a unicode string",
|
||||||
|
datadir.display()
|
||||||
|
)
|
||||||
|
})?,
|
||||||
|
];
|
||||||
|
|
||||||
|
if update_config {
|
||||||
|
args.push("--update-config");
|
||||||
}
|
}
|
||||||
|
|
||||||
args
|
for config_override in config_overrides {
|
||||||
}
|
args.extend(["-c", config_override]);
|
||||||
|
}
|
||||||
|
|
||||||
fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
|
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||||
Ok(if self.env.pageserver.auth_type != AuthType::Trust {
|
let mut filled_cmd = fill_rust_env_vars(cmd.args(&args).arg("--daemonize"));
|
||||||
// Generate a token to connect from the pageserver to a safekeeper
|
filled_cmd = fill_aws_secrets_vars(filled_cmd);
|
||||||
let token = self
|
|
||||||
.env
|
if !filled_cmd.status()?.success() {
|
||||||
.generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?;
|
bail!(
|
||||||
vec![("NEON_AUTH_TOKEN".to_owned(), token)]
|
"Pageserver failed to start. See console output and '{}' for details.",
|
||||||
} else {
|
datadir.join("pageserver.log").display()
|
||||||
Vec::new()
|
);
|
||||||
})
|
}
|
||||||
|
|
||||||
|
// It takes a while for the page server to start up. Wait until it is
|
||||||
|
// open for business.
|
||||||
|
const RETRIES: i8 = 15;
|
||||||
|
for retries in 1..RETRIES {
|
||||||
|
match self.check_status() {
|
||||||
|
Ok(()) => {
|
||||||
|
println!("\nPageserver started");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
match err {
|
||||||
|
PageserverHttpError::Transport(err) => {
|
||||||
|
if err.is_connect() && retries < 5 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
} else {
|
||||||
|
if retries == 5 {
|
||||||
|
println!() // put a line break after dots for second message
|
||||||
|
}
|
||||||
|
println!("Pageserver not responding yet, err {err} retrying ({retries})...");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PageserverHttpError::Response(msg) => {
|
||||||
|
bail!("pageserver failed to start: {msg} ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_secs(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bail!("pageserver failed to start in {RETRIES} seconds");
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -267,18 +288,69 @@ impl PageServerNode {
|
|||||||
/// If the server is not running, returns success
|
/// If the server is not running, returns success
|
||||||
///
|
///
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
background_process::stop_process(immediate, "pageserver", &self.pid_file())
|
let pid_file = self.pid_file();
|
||||||
|
if !pid_file.exists() {
|
||||||
|
println!("Pageserver is already stopped");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let pid = Pid::from_raw(read_pidfile(&pid_file)?);
|
||||||
|
|
||||||
|
let sig = if immediate {
|
||||||
|
print!("Stopping pageserver immediately..");
|
||||||
|
Signal::SIGQUIT
|
||||||
|
} else {
|
||||||
|
print!("Stopping pageserver gracefully..");
|
||||||
|
Signal::SIGTERM
|
||||||
|
};
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
match kill(pid, sig) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
println!("Pageserver with pid {pid} does not exist, but a PID file was found");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => bail!(
|
||||||
|
"Failed to send signal to pageserver with pid {pid}: {}",
|
||||||
|
err.desc()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until process is gone
|
||||||
|
for i in 0..600 {
|
||||||
|
let signal = None; // Send no signal, just get the error code
|
||||||
|
match kill(pid, signal) {
|
||||||
|
Ok(_) => (), // Process exists, keep waiting
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
// Process not found, we're done
|
||||||
|
println!("done!");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => bail!(
|
||||||
|
"Failed to send signal to pageserver with pid {}: {}",
|
||||||
|
pid,
|
||||||
|
err.desc()
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
if i % 10 == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("Failed to stop pageserver with pid {pid}");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
||||||
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
||||||
|
|
||||||
println!("Pageserver query: '{sql}'");
|
println!("Pageserver query: '{sql}'");
|
||||||
client.simple_query(sql).unwrap()
|
client.simple_query(sql).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
||||||
self.pg_connection_config.connect_no_tls()
|
self.pg_connection_config.connect(NoTls)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
@@ -347,11 +419,6 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<NonZeroU64>())
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
trace_read_requests: settings
|
|
||||||
.remove("trace_read_requests")
|
|
||||||
.map(|x| x.parse::<bool>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'trace_read_requests' as bool")?,
|
|
||||||
};
|
};
|
||||||
if !settings.is_empty() {
|
if !settings.is_empty() {
|
||||||
bail!("Unrecognized tenant settings: {settings:?}")
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
@@ -414,11 +481,6 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<NonZeroU64>())
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
trace_read_requests: settings
|
|
||||||
.get("trace_read_requests")
|
|
||||||
.map(|x| x.parse::<bool>())
|
|
||||||
.transpose()
|
|
||||||
.context("Failed to parse 'trace_read_requests' as bool")?,
|
|
||||||
})
|
})
|
||||||
.send()?
|
.send()?
|
||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
@@ -487,7 +549,7 @@ impl PageServerNode {
|
|||||||
pg_wal: Option<(Lsn, PathBuf)>,
|
pg_wal: Option<(Lsn, PathBuf)>,
|
||||||
pg_version: u32,
|
pg_version: u32,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
||||||
|
|
||||||
// Init base reader
|
// Init base reader
|
||||||
let (start_lsn, base_tarfile_path) = base;
|
let (start_lsn, base_tarfile_path) = base;
|
||||||
90
deny.toml
90
deny.toml
@@ -1,90 +0,0 @@
|
|||||||
# This file was auto-generated using `cargo deny init`.
|
|
||||||
# cargo-deny is a cargo plugin that lets you lint your project's
|
|
||||||
# dependency graph to ensure all your dependencies conform
|
|
||||||
# to your expectations and requirements.
|
|
||||||
|
|
||||||
# Root options
|
|
||||||
targets = []
|
|
||||||
all-features = false
|
|
||||||
no-default-features = false
|
|
||||||
feature-depth = 1
|
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check advisories`
|
|
||||||
# More documentation for the advisories section can be found here:
|
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
|
|
||||||
[advisories]
|
|
||||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
|
||||||
vulnerability = "deny"
|
|
||||||
unmaintained = "warn"
|
|
||||||
yanked = "warn"
|
|
||||||
notice = "warn"
|
|
||||||
ignore = []
|
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check licenses`
|
|
||||||
# More documentation for the licenses section can be found here:
|
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
|
||||||
[licenses]
|
|
||||||
unlicensed = "deny"
|
|
||||||
allow = [
|
|
||||||
"Apache-2.0",
|
|
||||||
"Artistic-2.0",
|
|
||||||
"BSD-2-Clause",
|
|
||||||
"BSD-3-Clause",
|
|
||||||
"ISC",
|
|
||||||
"MIT",
|
|
||||||
"MPL-2.0",
|
|
||||||
"OpenSSL",
|
|
||||||
"Unicode-DFS-2016",
|
|
||||||
]
|
|
||||||
deny = []
|
|
||||||
copyleft = "warn"
|
|
||||||
allow-osi-fsf-free = "neither"
|
|
||||||
default = "deny"
|
|
||||||
confidence-threshold = 0.8
|
|
||||||
exceptions = [
|
|
||||||
# Zlib license has some restrictions if we decide to change sth
|
|
||||||
{ allow = ["Zlib"], name = "const_format_proc_macros", version = "*" },
|
|
||||||
{ allow = ["Zlib"], name = "const_format", version = "*" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[[licenses.clarify]]
|
|
||||||
name = "ring"
|
|
||||||
version = "*"
|
|
||||||
expression = "MIT AND ISC AND OpenSSL"
|
|
||||||
license-files = [
|
|
||||||
{ path = "LICENSE", hash = 0xbd0eed23 }
|
|
||||||
]
|
|
||||||
|
|
||||||
[licenses.private]
|
|
||||||
ignore = true
|
|
||||||
registries = []
|
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check bans`.
|
|
||||||
# More documentation about the 'bans' section can be found here:
|
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
|
|
||||||
[bans]
|
|
||||||
multiple-versions = "warn"
|
|
||||||
wildcards = "allow"
|
|
||||||
highlight = "all"
|
|
||||||
workspace-default-features = "allow"
|
|
||||||
external-default-features = "allow"
|
|
||||||
allow = []
|
|
||||||
deny = []
|
|
||||||
skip = []
|
|
||||||
skip-tree = []
|
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check sources`.
|
|
||||||
# More documentation about the 'sources' section can be found here:
|
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
|
|
||||||
[sources]
|
|
||||||
unknown-registry = "warn"
|
|
||||||
unknown-git = "warn"
|
|
||||||
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
|
||||||
allow-git = []
|
|
||||||
|
|
||||||
[sources.allow-org]
|
|
||||||
github = [
|
|
||||||
"neondatabase",
|
|
||||||
]
|
|
||||||
gitlab = []
|
|
||||||
bitbucket = []
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user