Compare commits

..

1 Commits

Author SHA1 Message Date
Arseny Sher
cfd78950e2 basic sk bench of pgbench init with perf fixtures 2024-01-30 14:24:27 +03:00
557 changed files with 16521 additions and 53851 deletions

View File

@@ -1,27 +1,27 @@
* *
# Files
!Cargo.lock
!Cargo.toml
!Makefile
!rust-toolchain.toml !rust-toolchain.toml
!scripts/combine_control_files.py !Cargo.toml
!scripts/ninstall.sh !Cargo.lock
!vm-cgconfig.conf !Makefile
# Directories
!.cargo/ !.cargo/
!.config/ !.config/
!compute_tools/
!control_plane/ !control_plane/
!compute_tools/
!libs/ !libs/
!neon_local/
!pageserver/ !pageserver/
!pgxn/ !pgxn/
!proxy/ !proxy/
!s3_scrubber/
!safekeeper/ !safekeeper/
!s3_scrubber/
!storage_broker/ !storage_broker/
!trace/ !trace/
!vendor/postgres-*/ !vendor/postgres-v14/
!vendor/postgres-v15/
!vendor/postgres-v16/
!workspace_hack/ !workspace_hack/
!neon_local/
!scripts/ninstall.sh
!scripts/combine_control_files.py
!vm-cgconfig.conf

View File

@@ -16,9 +16,9 @@ assignees: ''
## Implementation ideas ## Implementation ideas
## Tasks
```[tasklist] ```[tasklist]
- [ ] Example Task ### Tasks
``` ```

View File

@@ -4,8 +4,6 @@ self-hosted-runner:
- dev - dev
- gen3 - gen3
- large - large
# Remove `macos-14` from the list after https://github.com/rhysd/actionlint/pull/392 is merged.
- macos-14
- small - small
- us-east-2 - us-east-2
config-variables: config-variables:

View File

@@ -39,7 +39,7 @@ runs:
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true) PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
if [ "${PR_NUMBER}" != "null" ]; then if [ "${PR_NUMBER}" != "null" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER} BRANCH_OR_PR=pr-${PR_NUMBER}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
# Shortcut for special branches # Shortcut for special branches
BRANCH_OR_PR=${GITHUB_REF_NAME} BRANCH_OR_PR=${GITHUB_REF_NAME}
else else
@@ -59,7 +59,7 @@ runs:
BUCKET: neon-github-public-dev BUCKET: neon-github-public-dev
# TODO: We can replace with a special docker image with Java and Allure pre-installed # TODO: We can replace with a special docker image with Java and Allure pre-installed
- uses: actions/setup-java@v4 - uses: actions/setup-java@v3
with: with:
distribution: 'temurin' distribution: 'temurin'
java-version: '17' java-version: '17'
@@ -76,8 +76,8 @@ runs:
rm -f ${ALLURE_ZIP} rm -f ${ALLURE_ZIP}
fi fi
env: env:
ALLURE_VERSION: 2.27.0 ALLURE_VERSION: 2.24.0
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777 ALLURE_ZIP_SHA256: 60b1d6ce65d9ef24b23cf9c2c19fd736a123487c38e54759f1ed1a7a77353c90
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this # Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
- name: Acquire lock - name: Acquire lock
@@ -179,11 +179,22 @@ runs:
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}" aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
fi fi
- name: Cache poetry deps - name: Store Allure test stat in the DB
uses: actions/cache@v4 if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }}
with: shell: bash -euxo pipefail {0}
path: ~/.cache/pypoetry/virtualenvs env:
key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
REPORT_JSON_URL: ${{ steps.generate-report.outputs.report-json-url }}
run: |
export DATABASE_URL=${REGRESS_TEST_RESULT_CONNSTR}
./scripts/pysync
poetry run python3 scripts/ingest_regress_test_result.py \
--revision ${COMMIT_SHA} \
--reference ${GITHUB_REF} \
--build-type unified \
--ingest ${WORKDIR}/report/data/suites.json
- name: Store Allure test stat in the DB (new) - name: Store Allure test stat in the DB (new)
if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }} if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }}
@@ -215,7 +226,7 @@ runs:
rm -rf ${WORKDIR} rm -rf ${WORKDIR}
fi fi
- uses: actions/github-script@v7 - uses: actions/github-script@v6
if: always() if: always()
env: env:
REPORT_URL: ${{ steps.generate-report.outputs.report-url }} REPORT_URL: ${{ steps.generate-report.outputs.report-url }}

View File

@@ -19,7 +19,7 @@ runs:
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true) PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
if [ "${PR_NUMBER}" != "null" ]; then if [ "${PR_NUMBER}" != "null" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER} BRANCH_OR_PR=pr-${PR_NUMBER}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
# Shortcut for special branches # Shortcut for special branches
BRANCH_OR_PR=${GITHUB_REF_NAME} BRANCH_OR_PR=${GITHUB_REF_NAME}
else else

View File

@@ -44,10 +44,6 @@ inputs:
description: 'Postgres version to use for tests' description: 'Postgres version to use for tests'
required: false required: false
default: 'v14' default: 'v14'
benchmark_durations:
description: 'benchmark durations JSON'
required: false
default: '{}'
runs: runs:
using: "composite" using: "composite"
@@ -80,16 +76,17 @@ runs:
- name: Checkout - name: Checkout
if: inputs.needs_postgres_source == 'true' if: inputs.needs_postgres_source == 'true'
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: true submodules: true
fetch-depth: 1 fetch-depth: 1
- name: Cache poetry deps - name: Cache poetry deps
uses: actions/cache@v4 id: cache_poetry
uses: actions/cache@v3
with: with:
path: ~/.cache/pypoetry/virtualenvs path: ~/.cache/pypoetry/virtualenvs
key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
- name: Install Python deps - name: Install Python deps
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
@@ -163,7 +160,7 @@ runs:
# We use pytest-split plugin to run benchmarks in parallel on different CI runners # We use pytest-split plugin to run benchmarks in parallel on different CI runners
if [ "${TEST_SELECTION}" = "test_runner/performance" ] && [ "${{ inputs.build_type }}" != "remote" ]; then if [ "${TEST_SELECTION}" = "test_runner/performance" ] && [ "${{ inputs.build_type }}" != "remote" ]; then
mkdir -p $TEST_OUTPUT mkdir -p $TEST_OUTPUT
echo '${{ inputs.benchmark_durations || '{}' }}' > $TEST_OUTPUT/benchmark_durations.json poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/benchmark_durations.json"
EXTRA_PARAMS="--durations-path $TEST_OUTPUT/benchmark_durations.json $EXTRA_PARAMS" EXTRA_PARAMS="--durations-path $TEST_OUTPUT/benchmark_durations.json $EXTRA_PARAMS"
fi fi

View File

@@ -16,14 +16,7 @@ concurrency:
cancel-in-progress: ${{ github.event_name == 'pull_request' }} cancel-in-progress: ${{ github.event_name == 'pull_request' }}
jobs: jobs:
check-permissions:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'run-no-ci') }}
uses: ./.github/workflows/check-permissions.yml
with:
github-event-name: ${{ github.event_name}}
actionlint: actionlint:
needs: [ check-permissions ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4

View File

@@ -64,7 +64,7 @@ jobs:
steps: steps:
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run" - run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
ref: main ref: main
token: ${{ secrets.CI_ACCESS_TOKEN }} token: ${{ secrets.CI_ACCESS_TOKEN }}
@@ -93,7 +93,6 @@ jobs:
--body-file "body.md" \ --body-file "body.md" \
--head "${BRANCH}" \ --head "${BRANCH}" \
--base "main" \ --base "main" \
--label "run-e2e-tests-in-draft" \
--draft --draft
fi fi

View File

@@ -62,11 +62,11 @@ jobs:
runs-on: [ self-hosted, us-east-2, x64 ] runs-on: [ self-hosted, us-east-2, x64 ]
container: container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
options: --init options: --init
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download
@@ -214,14 +214,14 @@ jobs:
runs-on: [ self-hosted, us-east-2, x64 ] runs-on: [ self-hosted, us-east-2, x64 ]
container: container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
options: --init options: --init
# Increase timeout to 8h, default timeout is 6h # Increase timeout to 8h, default timeout is 6h
timeout-minutes: 480 timeout-minutes: 480
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download
@@ -362,11 +362,11 @@ jobs:
runs-on: [ self-hosted, us-east-2, x64 ] runs-on: [ self-hosted, us-east-2, x64 ]
container: container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
options: --init options: --init
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download
@@ -461,11 +461,11 @@ jobs:
runs-on: [ self-hosted, us-east-2, x64 ] runs-on: [ self-hosted, us-east-2, x64 ]
container: container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
options: --init options: --init
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download
@@ -558,11 +558,11 @@ jobs:
runs-on: [ self-hosted, us-east-2, x64 ] runs-on: [ self-hosted, us-east-2, x64 ]
container: container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
options: --init options: --init
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Download Neon artifact - name: Download Neon artifact
uses: ./.github/actions/download uses: ./.github/actions/download

View File

@@ -1,105 +0,0 @@
name: Build build-tools image
on:
workflow_call:
inputs:
image-tag:
description: "build-tools image tag"
required: true
type: string
outputs:
image-tag:
description: "build-tools tag"
value: ${{ inputs.image-tag }}
image:
description: "build-tools image"
value: neondatabase/build-tools:${{ inputs.image-tag }}
defaults:
run:
shell: bash -euo pipefail {0}
concurrency:
group: build-build-tools-image-${{ inputs.image-tag }}
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {}
jobs:
check-image:
uses: ./.github/workflows/check-build-tools-image.yml
# This job uses older version of GitHub Actions because it's run on gen2 runners, which don't support node 20 (for newer versions)
build-image:
needs: [ check-image ]
if: needs.check-image.outputs.found == 'false'
strategy:
matrix:
arch: [ x64, arm64 ]
runs-on: ${{ fromJson(format('["self-hosted", "dev", "{0}"]', matrix.arch)) }}
env:
IMAGE_TAG: ${{ inputs.image-tag }}
steps:
- name: Check `input.tag` is correct
env:
INPUTS_IMAGE_TAG: ${{ inputs.image-tag }}
CHECK_IMAGE_TAG : ${{ needs.check-image.outputs.image-tag }}
run: |
if [ "${INPUTS_IMAGE_TAG}" != "${CHECK_IMAGE_TAG}" ]; then
echo "'inputs.image-tag' (${INPUTS_IMAGE_TAG}) does not match the tag of the latest build-tools image 'inputs.image-tag' (${CHECK_IMAGE_TAG})"
exit 1
fi
- uses: actions/checkout@v3
# Use custom DOCKER_CONFIG directory to avoid conflicts with default settings
# The default value is ~/.docker
- name: Set custom docker config directory
run: |
mkdir -p /tmp/.docker-custom
echo DOCKER_CONFIG=/tmp/.docker-custom >> $GITHUB_ENV
- uses: docker/setup-buildx-action@v2
- uses: docker/login-action@v2
with:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- uses: docker/build-push-action@v4
with:
context: .
provenance: false
push: true
pull: true
file: Dockerfile.build-tools
cache-from: type=registry,ref=neondatabase/build-tools:cache-${{ matrix.arch }}
cache-to: type=registry,ref=neondatabase/build-tools:cache-${{ matrix.arch }},mode=max
tags: neondatabase/build-tools:${{ inputs.image-tag }}-${{ matrix.arch }}
- name: Remove custom docker config directory
run: |
rm -rf /tmp/.docker-custom
merge-images:
needs: [ build-image ]
runs-on: ubuntu-latest
env:
IMAGE_TAG: ${{ inputs.image-tag }}
steps:
- uses: docker/login-action@v3
with:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Create multi-arch image
run: |
docker buildx imagetools create -t neondatabase/build-tools:${IMAGE_TAG} \
neondatabase/build-tools:${IMAGE_TAG}-x64 \
neondatabase/build-tools:${IMAGE_TAG}-arm64

View File

@@ -0,0 +1,124 @@
name: Build and Push Docker Image
on:
workflow_call:
inputs:
dockerfile-path:
required: true
type: string
image-name:
required: true
type: string
outputs:
build-tools-tag:
description: "tag generated for build tools"
value: ${{ jobs.tag.outputs.build-tools-tag }}
jobs:
check-if-build-tools-dockerfile-changed:
runs-on: ubuntu-latest
outputs:
docker_file_changed: ${{ steps.dockerfile.outputs.docker_file_changed }}
steps:
- name: Check if Dockerfile.buildtools has changed
id: dockerfile
run: |
if [[ "$GITHUB_EVENT_NAME" != "pull_request" ]]; then
echo "docker_file_changed=false" >> $GITHUB_OUTPUT
exit
fi
updated_files=$(gh pr --repo neondatabase/neon diff ${{ github.event.pull_request.number }} --name-only)
if [[ $updated_files == *"Dockerfile.buildtools"* ]]; then
echo "docker_file_changed=true" >> $GITHUB_OUTPUT
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
tag:
runs-on: ubuntu-latest
needs: [ check-if-build-tools-dockerfile-changed ]
outputs:
build-tools-tag: ${{steps.buildtools-tag.outputs.image_tag}}
steps:
- name: Get buildtools tag
env:
DOCKERFILE_CHANGED: ${{ needs.check-if-build-tools-dockerfile-changed.outputs.docker_file_changed }}
run: |
if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]] && [[ "${DOCKERFILE_CHANGED}" == "true" ]]; then
IMAGE_TAG=$GITHUB_RUN_ID
else
IMAGE_TAG=pinned
fi
echo "image_tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
shell: bash
id: buildtools-tag
kaniko:
if: needs.check-if-build-tools-dockerfile-changed.outputs.docker_file_changed == 'true'
needs: [ tag, check-if-build-tools-dockerfile-changed ]
runs-on: [ self-hosted, dev, x64 ]
container: gcr.io/kaniko-project/executor:v1.7.0-debug
steps:
- name: Checkout
uses: actions/checkout@v1
- name: Configure ECR login
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build
run: |
/kaniko/executor \
--reproducible \
--snapshotMode=redo \
--skip-unused-stages \
--dockerfile ${{ inputs.dockerfile-path }} \
--cache=true \
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache \
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }}-amd64
kaniko-arm:
if: needs.check-if-build-tools-dockerfile-changed.outputs.docker_file_changed == 'true'
needs: [ tag, check-if-build-tools-dockerfile-changed ]
runs-on: [ self-hosted, dev, arm64 ]
container: gcr.io/kaniko-project/executor:v1.7.0-debug
steps:
- name: Checkout
uses: actions/checkout@v1
- name: Configure ECR login
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build
run: |
/kaniko/executor \
--reproducible \
--snapshotMode=redo \
--skip-unused-stages \
--dockerfile ${{ inputs.dockerfile-path }} \
--cache=true \
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache \
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }}-arm64
manifest:
if: needs.check-if-build-tools-dockerfile-changed.outputs.docker_file_changed == 'true'
name: 'manifest'
runs-on: [ self-hosted, dev, x64 ]
needs:
- tag
- kaniko
- kaniko-arm
- check-if-build-tools-dockerfile-changed
steps:
- name: Create manifest
run: |
docker manifest create 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }} \
--amend 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }}-amd64 \
--amend 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }}-arm64
- name: Push manifest
run: docker manifest push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${{ inputs.image-name }}:${{ needs.tag.outputs.build-tools-tag }}

View File

@@ -5,7 +5,6 @@ on:
branches: branches:
- main - main
- release - release
- release-proxy
pull_request: pull_request:
defaults: defaults:
@@ -23,14 +22,29 @@ env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix # A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} E2E_CONCURRENCY_GROUP: ${{ github.repository }}-${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
jobs: jobs:
check-permissions: check-permissions:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'run-no-ci') }} runs-on: ubuntu-latest
uses: ./.github/workflows/check-permissions.yml
with: steps:
github-event-name: ${{ github.event_name}} - name: Disallow PRs from forks
if: |
github.event_name == 'pull_request' &&
github.event.pull_request.head.repo.full_name != github.repository
run: |
if [ "${{ contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.pull_request.author_association) }}" = "true" ]; then
MESSAGE="Please create a PR from a branch of ${GITHUB_REPOSITORY} instead of a fork"
else
MESSAGE="The PR should be reviewed and labelled with 'approved-for-ci-run' to trigger a CI run"
fi
echo >&2 "We don't run CI for PRs from forks"
echo >&2 "${MESSAGE}"
exit 1
cancel-previous-e2e-tests: cancel-previous-e2e-tests:
needs: [ check-permissions ] needs: [ check-permissions ]
@@ -55,7 +69,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -68,8 +82,6 @@ jobs:
echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
else else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'" echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
@@ -77,39 +89,34 @@ jobs:
shell: bash shell: bash
id: build-tag id: build-tag
check-build-tools-image: build-buildtools-image:
needs: [ check-permissions ] needs: [ check-permissions ]
uses: ./.github/workflows/check-build-tools-image.yml uses: ./.github/workflows/build_and_push_docker_image.yml
build-build-tools-image:
needs: [ check-build-tools-image ]
uses: ./.github/workflows/build-build-tools-image.yml
with: with:
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }} dockerfile-path: Dockerfile.buildtools
image-name: build-tools
secrets: inherit secrets: inherit
check-codestyle-python: check-codestyle-python:
needs: [ check-permissions, build-build-tools-image ] needs: [ check-permissions, build-buildtools-image ]
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: false submodules: false
fetch-depth: 1 fetch-depth: 1
- name: Cache poetry deps - name: Cache poetry deps
uses: actions/cache@v4 id: cache_poetry
uses: actions/cache@v3
with: with:
path: ~/.cache/pypoetry/virtualenvs path: ~/.cache/pypoetry/virtualenvs
key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} key: v1-codestyle-python-deps-${{ hashFiles('poetry.lock') }}
- name: Install Python deps - name: Install Python deps
run: ./scripts/pysync run: ./scripts/pysync
@@ -124,18 +131,15 @@ jobs:
run: poetry run mypy . run: poetry run mypy .
check-codestyle-rust: check-codestyle-rust:
needs: [ check-permissions, build-build-tools-image ] needs: [ check-permissions, build-buildtools-image ]
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, large ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: true submodules: true
fetch-depth: 1 fetch-depth: 1
@@ -143,7 +147,7 @@ jobs:
# Disabled for now # Disabled for now
# - name: Restore cargo deps cache # - name: Restore cargo deps cache
# id: cache_cargo # id: cache_cargo
# uses: actions/cache@v4 # uses: actions/cache@v3
# with: # with:
# path: | # path: |
# !~/.cargo/registry/src # !~/.cargo/registry/src
@@ -194,13 +198,10 @@ jobs:
run: cargo deny check --hide-inclusion-graph run: cargo deny check --hide-inclusion-graph
build-neon: build-neon:
needs: [ check-permissions, tag, build-build-tools-image ] needs: [ check-permissions, tag, build-buildtools-image ]
runs-on: [ self-hosted, gen3, large ] runs-on: [ self-hosted, gen3, large ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
# Raise locked memory limit for tokio-epoll-uring. # Raise locked memory limit for tokio-epoll-uring.
# On 5.10 LTS kernels < 5.10.162 (and generally mainline kernels < 5.12), # On 5.10 LTS kernels < 5.10.162 (and generally mainline kernels < 5.12),
# io_uring will account the memory of the CQ and SQ as locked. # io_uring will account the memory of the CQ and SQ as locked.
@@ -231,7 +232,7 @@ jobs:
done done
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: true submodules: true
fetch-depth: 1 fetch-depth: 1
@@ -253,7 +254,7 @@ jobs:
done done
if [ "${FAILED}" = "true" ]; then if [ "${FAILED}" = "true" ]; then
echo >&2 "Please update vendor/revisions.json if these changes are intentional" echo >&2 "Please update vendors/revisions.json if these changes are intentional"
exit 1 exit 1
fi fi
@@ -303,7 +304,7 @@ jobs:
# compressed crates. # compressed crates.
# - name: Cache cargo deps # - name: Cache cargo deps
# id: cache_cargo # id: cache_cargo
# uses: actions/cache@v4 # uses: actions/cache@v3
# with: # with:
# path: | # path: |
# ~/.cargo/registry/ # ~/.cargo/registry/
@@ -317,21 +318,21 @@ jobs:
- name: Cache postgres v14 build - name: Cache postgres v14 build
id: cache_pg_14 id: cache_pg_14
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v14 path: pg_install/v14
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v15 build - name: Cache postgres v15 build
id: cache_pg_15 id: cache_pg_15
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v15 path: pg_install/v15
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v16 build - name: Cache postgres v16 build
id: cache_pg_16 id: cache_pg_16
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v16 path: pg_install/v16
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
@@ -438,13 +439,10 @@ jobs:
uses: ./.github/actions/save-coverage-data uses: ./.github/actions/save-coverage-data
regress-tests: regress-tests:
needs: [ check-permissions, build-neon, build-build-tools-image, tag ] needs: [ check-permissions, build-neon, build-buildtools-image, tag ]
runs-on: [ self-hosted, gen3, large ] runs-on: [ self-hosted, gen3, large ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
# for changed limits, see comments on `options:` earlier in this file # for changed limits, see comments on `options:` earlier in this file
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864 options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
strategy: strategy:
@@ -454,14 +452,13 @@ jobs:
pg_version: [ v14, v15, v16 ] pg_version: [ v14, v15, v16 ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: true submodules: true
fetch-depth: 1 fetch-depth: 1
- name: Pytest regression tests - name: Pytest regression tests
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
timeout-minutes: 60
with: with:
build_type: ${{ matrix.build_type }} build_type: ${{ matrix.build_type }}
test_selection: regress test_selection: regress
@@ -475,60 +472,17 @@ jobs:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
BUILD_TAG: ${{ needs.tag.outputs.build-tag }} BUILD_TAG: ${{ needs.tag.outputs.build-tag }}
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring PAGESERVER_VIRTUAL_FILE_IO_ENGINE: std-fs
PAGESERVER_GET_VECTORED_IMPL: vectored
# Temporary disable this step until we figure out why it's so flaky
# Ref https://github.com/neondatabase/neon/issues/4540
- name: Merge and upload coverage data - name: Merge and upload coverage data
if: | if: matrix.build_type == 'debug' && matrix.pg_version == 'v14'
false &&
matrix.build_type == 'debug' && matrix.pg_version == 'v14'
uses: ./.github/actions/save-coverage-data uses: ./.github/actions/save-coverage-data
get-benchmarks-durations:
outputs:
json: ${{ steps.get-benchmark-durations.outputs.json }}
needs: [ check-permissions, build-build-tools-image ]
runs-on: [ self-hosted, gen3, small ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Cache poetry deps
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry/virtualenvs
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
- name: Install Python deps
run: ./scripts/pysync
- name: get benchmark durations
id: get-benchmark-durations
env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
run: |
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" \
--days 10 \
--output /tmp/benchmark_durations.json
echo "json=$(jq --compact-output '.' /tmp/benchmark_durations.json)" >> $GITHUB_OUTPUT
benchmarks: benchmarks:
needs: [ check-permissions, build-neon, build-build-tools-image, get-benchmarks-durations ] needs: [ check-permissions, build-neon, build-buildtools-image ]
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
# for changed limits, see comments on `options:` earlier in this file # for changed limits, see comments on `options:` earlier in this file
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864 options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks') if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
@@ -536,11 +490,11 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
# the amount of groups (N) should be reflected in `extra_params: --splits N ...` # the amount of groups (N) should be reflected in `extra_params: --splits N ...`
pytest_split_group: [ 1, 2, 3, 4, 5 ] pytest_split_group: [ 1, 2, 3, 4 ]
build_type: [ release ] build_type: [ release ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Pytest benchmarks - name: Pytest benchmarks
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
@@ -549,8 +503,7 @@ jobs:
test_selection: performance test_selection: performance
run_in_parallel: false run_in_parallel: false
save_perf_report: ${{ github.ref_name == 'main' }} save_perf_report: ${{ github.ref_name == 'main' }}
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }} extra_params: --splits 4 --group ${{ matrix.pytest_split_group }}
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
env: env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -560,19 +513,16 @@ jobs:
# while coverage is currently collected for the debug ones # while coverage is currently collected for the debug ones
create-test-report: create-test-report:
needs: [ check-permissions, regress-tests, coverage-report, benchmarks, build-build-tools-image ] needs: [ check-permissions, regress-tests, coverage-report, benchmarks, build-buildtools-image ]
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }} if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Create Allure report - name: Create Allure report
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
@@ -581,9 +531,10 @@ jobs:
with: with:
store-test-results-into-db: true store-test-results-into-db: true
env: env:
REGRESS_TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
- uses: actions/github-script@v7 - uses: actions/github-script@v6
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
with: with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries # Retry script for 5XX server errors: https://github.com/actions/github-script#retries
@@ -609,13 +560,10 @@ jobs:
}) })
coverage-report: coverage-report:
needs: [ check-permissions, regress-tests, build-build-tools-image ] needs: [ check-permissions, regress-tests, build-buildtools-image ]
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${{ needs.build-buildtools-image.outputs.build-tools-tag }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
strategy: strategy:
fail-fast: false fail-fast: false
@@ -626,7 +574,7 @@ jobs:
coverage-json: ${{ steps.upload-coverage-report-new.outputs.summary-json }} coverage-json: ${{ steps.upload-coverage-report-new.outputs.summary-json }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: true submodules: true
fetch-depth: 0 fetch-depth: 0
@@ -661,6 +609,17 @@ jobs:
--input-objects=/tmp/coverage/binaries.list \ --input-objects=/tmp/coverage/binaries.list \
--format=lcov --format=lcov
- name: Upload coverage report
id: upload-coverage-report
env:
BUCKET: neon-github-public-dev
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
run: |
aws s3 cp --only-show-errors --recursive /tmp/coverage/report s3://${BUCKET}/code-coverage/${COMMIT_SHA}
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/code-coverage/${COMMIT_SHA}/index.html
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
- name: Build coverage report NEW - name: Build coverage report NEW
id: upload-coverage-report-new id: upload-coverage-report-new
env: env:
@@ -695,13 +654,23 @@ jobs:
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/code-coverage/${COMMIT_SHA}/lcov/summary.json REPORT_URL=https://${BUCKET}.s3.amazonaws.com/code-coverage/${COMMIT_SHA}/lcov/summary.json
echo "summary-json=${REPORT_URL}" >> $GITHUB_OUTPUT echo "summary-json=${REPORT_URL}" >> $GITHUB_OUTPUT
- uses: actions/github-script@v7 - uses: actions/github-script@v6
env: env:
REPORT_URL: ${{ steps.upload-coverage-report.outputs.report-url }}
REPORT_URL_NEW: ${{ steps.upload-coverage-report-new.outputs.report-url }} REPORT_URL_NEW: ${{ steps.upload-coverage-report-new.outputs.report-url }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
with: with:
script: | script: |
const { REPORT_URL_NEW, COMMIT_SHA } = process.env const { REPORT_URL, REPORT_URL_NEW, COMMIT_SHA } = process.env
await github.rest.repos.createCommitStatus({
owner: context.repo.owner,
repo: context.repo.repo,
sha: `${COMMIT_SHA}`,
state: 'success',
target_url: `${REPORT_URL}`,
context: 'Code coverage report',
})
await github.rest.repos.createCommitStatus({ await github.rest.repos.createCommitStatus({
owner: context.repo.owner, owner: context.repo.owner,
@@ -713,146 +682,206 @@ jobs:
}) })
trigger-e2e-tests: trigger-e2e-tests:
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' }}
needs: [ check-permissions, promote-images, tag ] needs: [ check-permissions, promote-images, tag ]
uses: ./.github/workflows/trigger-e2e-tests.yml runs-on: [ self-hosted, gen3, small ]
secrets: inherit container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
options: --init
steps:
- name: Set PR's status to pending and request a remote CI test
run: |
# For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
# to place a job run status update later.
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
REMOTE_REPO="${{ github.repository_owner }}/cloud"
curl -f -X POST \
https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
-H "Accept: application/vnd.github.v3+json" \
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
--data \
"{
\"state\": \"pending\",
\"context\": \"neon-cloud-e2e\",
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
}"
curl -f -X POST \
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
-H "Accept: application/vnd.github.v3+json" \
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
--data \
"{
\"ref\": \"main\",
\"inputs\": {
\"ci_job_name\": \"neon-cloud-e2e\",
\"commit_hash\": \"$COMMIT_SHA\",
\"remote_repo\": \"${{ github.repository }}\",
\"storage_image_tag\": \"${{ needs.tag.outputs.build-tag }}\",
\"compute_image_tag\": \"${{ needs.tag.outputs.build-tag }}\",
\"concurrency_group\": \"${{ env.E2E_CONCURRENCY_GROUP }}\"
}
}"
neon-image: neon-image:
needs: [ check-permissions, build-build-tools-image, tag ] needs: [ check-permissions, build-buildtools-image, tag ]
runs-on: [ self-hosted, gen3, large ] runs-on: [ self-hosted, gen3, large ]
container: gcr.io/kaniko-project/executor:v1.9.2-debug
defaults:
run:
shell: sh -eu {0}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v1 # v3 won't work with kaniko
with: with:
submodules: true submodules: true
fetch-depth: 0 fetch-depth: 0
# Use custom DOCKER_CONFIG directory to avoid conflicts with default settings - name: Configure ECR and Docker Hub login
# The default value is ~/.docker
- name: Set custom docker config directory
run: | run: |
mkdir -p .docker-custom DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV echo "::add-mask::${DOCKERHUB_AUTH}"
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3 cat <<-EOF > /kaniko/.docker/config.json
with: {
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} "auths": {
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} "https://index.docker.io/v1/": {
"auth": "${DOCKERHUB_AUTH}"
}
},
"credHelpers": {
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
}
}
EOF
- uses: docker/login-action@v3 - name: Kaniko build neon
with: run:
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com /kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
username: ${{ secrets.AWS_ACCESS_KEY_DEV }} --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
password: ${{ secrets.AWS_SECRET_KEY_DEV }} --context .
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
--build-arg BUILD_TAG=${{ needs.tag.outputs.build-tag }}
--build-arg TAG=${{ needs.build-buildtools-image.outputs.build-tools-tag }}
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
--destination neondatabase/neon:${{needs.tag.outputs.build-tag}}
- uses: docker/build-push-action@v5 # Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
with: - name: Cleanup ECR folder
context: . run: rm -rf ~/.ecr
build-args: |
GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
BUILD_TAG=${{ needs.tag.outputs.build-tag }}
TAG=${{ needs.build-build-tools-image.outputs.image-tag }}
provenance: false
push: true
pull: true
file: Dockerfile
cache-from: type=registry,ref=neondatabase/neon:cache
cache-to: type=registry,ref=neondatabase/neon:cache,mode=max
tags: |
369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
neondatabase/neon:${{needs.tag.outputs.build-tag}}
- name: Remove custom docker config directory compute-tools-image:
if: always() runs-on: [ self-hosted, gen3, large ]
needs: [ check-permissions, build-buildtools-image, tag ]
container: gcr.io/kaniko-project/executor:v1.9.2-debug
defaults:
run:
shell: sh -eu {0}
steps:
- name: Checkout
uses: actions/checkout@v1 # v3 won't work with kaniko
- name: Configure ECR and Docker Hub login
run: | run: |
rm -rf .docker-custom DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
echo "::add-mask::${DOCKERHUB_AUTH}"
cat <<-EOF > /kaniko/.docker/config.json
{
"auths": {
"https://index.docker.io/v1/": {
"auth": "${DOCKERHUB_AUTH}"
}
},
"credHelpers": {
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
}
}
EOF
- name: Kaniko build compute tools
run:
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
--context .
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
--build-arg TAG=${{needs.build-buildtools-image.outputs.build-tools-tag}}
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
--dockerfile Dockerfile.compute-tools
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
--destination neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
- name: Cleanup ECR folder
run: rm -rf ~/.ecr
compute-node-image: compute-node-image:
needs: [ check-permissions, build-build-tools-image, tag ] needs: [ check-permissions, build-buildtools-image, tag ]
runs-on: [ self-hosted, gen3, large ] runs-on: [ self-hosted, gen3, large ]
container:
image: gcr.io/kaniko-project/executor:v1.9.2-debug
# Workaround for "Resolving download.osgeo.org (download.osgeo.org)... failed: Temporary failure in name resolution.""
# Should be prevented by https://github.com/neondatabase/neon/issues/4281
options: --add-host=download.osgeo.org:140.211.15.30
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
version: [ v14, v15, v16 ] version: [ v14, v15, v16 ]
defaults:
run:
shell: sh -eu {0}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v1 # v3 won't work with kaniko
with: with:
submodules: true submodules: true
fetch-depth: 0 fetch-depth: 0
# Use custom DOCKER_CONFIG directory to avoid conflicts with default settings - name: Configure ECR and Docker Hub login
# The default value is ~/.docker
- name: Set custom docker config directory
run: | run: |
mkdir -p .docker-custom DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV echo "::add-mask::${DOCKERHUB_AUTH}"
- uses: docker/setup-buildx-action@v3
with:
# Disable parallelism for docker buildkit.
# As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner.
config-inline: |
[worker.oci]
max-parallelism = 1
- uses: docker/login-action@v3 cat <<-EOF > /kaniko/.docker/config.json
with: {
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} "auths": {
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} "https://index.docker.io/v1/": {
"auth": "${DOCKERHUB_AUTH}"
}
},
"credHelpers": {
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
}
}
EOF
- uses: docker/login-action@v3 - name: Kaniko build compute node with extensions
with: run:
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com /kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
username: ${{ secrets.AWS_ACCESS_KEY_DEV }} --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
password: ${{ secrets.AWS_SECRET_KEY_DEV }} --context .
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
--build-arg PG_VERSION=${{ matrix.version }}
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
--build-arg TAG=${{needs.build-buildtools-image.outputs.build-tools-tag}}
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
--dockerfile Dockerfile.compute-node
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
--destination neondatabase/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
--cleanup
- name: Build compute-node image # Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
uses: docker/build-push-action@v5 - name: Cleanup ECR folder
with: run: rm -rf ~/.ecr
context: .
build-args: |
GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
PG_VERSION=${{ matrix.version }}
BUILD_TAG=${{ needs.tag.outputs.build-tag }}
TAG=${{ needs.build-build-tools-image.outputs.image-tag }}
provenance: false
push: true
pull: true
file: Dockerfile.compute-node
cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache
cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache,mode=max
tags: |
369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
neondatabase/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
- name: Build compute-tools image
# compute-tools are Postgres independent, so build it only once
if: ${{ matrix.version == 'v16' }}
uses: docker/build-push-action@v5
with:
target: compute-tools-image
context: .
build-args: |
GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
BUILD_TAG=${{ needs.tag.outputs.build-tag }}
TAG=${{ needs.build-build-tools-image.outputs.image-tag }}
provenance: false
push: true
pull: true
file: Dockerfile.compute-node
tags: |
369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }}
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}
- name: Remove custom docker config directory
if: always()
run: |
rm -rf .docker-custom
vm-compute-node-image: vm-compute-node-image:
needs: [ check-permissions, tag, compute-node-image ] needs: [ check-permissions, tag, compute-node-image ]
@@ -865,7 +894,7 @@ jobs:
run: run:
shell: sh -eu {0} shell: sh -eu {0}
env: env:
VM_BUILDER_VERSION: v0.23.2 VM_BUILDER_VERSION: v0.21.0
steps: steps:
- name: Checkout - name: Checkout
@@ -896,12 +925,12 @@ jobs:
docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
test-images: test-images:
needs: [ check-permissions, tag, neon-image, compute-node-image ] needs: [ check-permissions, tag, neon-image, compute-node-image, compute-tools-image ]
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -930,8 +959,7 @@ jobs:
fi fi
- name: Verify docker-compose example - name: Verify docker-compose example
timeout-minutes: 20 run: env REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
- name: Print logs and clean up - name: Print logs and clean up
if: always() if: always()
@@ -964,7 +992,9 @@ jobs:
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} vm-compute-node-v16 crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} vm-compute-node-v16
- name: Add latest tag to images - name: Add latest tag to images
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' if: |
(github.ref_name == 'main' || github.ref_name == 'release') &&
github.event_name != 'workflow_dispatch'
run: | run: |
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
@@ -976,7 +1006,9 @@ jobs:
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest
- name: Push images to production ECR - name: Push images to production ECR
if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy' if: |
(github.ref_name == 'main' || github.ref_name == 'release') &&
github.event_name != 'workflow_dispatch'
run: | run: |
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
@@ -1000,7 +1032,9 @@ jobs:
crane push vm-compute-node-v16 neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} crane push vm-compute-node-v16 neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}}
- name: Push latest tags to Docker Hub - name: Push latest tags to Docker Hub
if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy' if: |
(github.ref_name == 'main' || github.ref_name == 'release') &&
github.event_name != 'workflow_dispatch'
run: | run: |
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
@@ -1090,7 +1124,7 @@ jobs:
deploy: deploy:
needs: [ check-permissions, promote-images, tag, regress-tests, trigger-custom-extensions-build-and-wait ] needs: [ check-permissions, promote-images, tag, regress-tests, trigger-custom-extensions-build-and-wait ]
if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy' if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
runs-on: [ self-hosted, gen3, small ] runs-on: [ self-hosted, gen3, small ]
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
@@ -1110,7 +1144,7 @@ jobs:
done done
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
submodules: false submodules: false
fetch-depth: 0 fetch-depth: 0
@@ -1121,46 +1155,19 @@ jobs:
run: | run: |
if [[ "$GITHUB_REF_NAME" == "main" ]]; then if [[ "$GITHUB_REF_NAME" == "main" ]]; then
gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}} -f deployPreprodRegion=false gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}} -f deployPreprodRegion=false
# TODO: move deployPreprodRegion to release (`"$GITHUB_REF_NAME" == "release"` block), once Staging support different compute tag prefixes for different regions
gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}} -f deployPreprodRegion=true
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main \ gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}}
-f deployPgSniRouter=false \
-f deployProxy=false \
-f deployStorage=true \
-f deployStorageBroker=true \
-f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}} \
-f deployPreprodRegion=true
gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main \
-f deployPgSniRouter=false \
-f deployProxy=false \
-f deployStorage=true \
-f deployStorageBroker=true \
-f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}}
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main \
-f deployPgSniRouter=true \
-f deployProxy=true \
-f deployStorage=false \
-f deployStorageBroker=false \
-f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}} \
-f deployPreprodRegion=true
gh workflow --repo neondatabase/aws run deploy-proxy-prod.yml --ref main \
-f deployPgSniRouter=true \
-f deployProxy=true \
-f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}}
else else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'" echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1 exit 1
fi fi
- name: Create git tag - name: Create git tag
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' if: github.ref_name == 'release'
uses: actions/github-script@v7 uses: actions/github-script@v6
with: with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries # Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5 retries: 5
@@ -1172,10 +1179,9 @@ jobs:
sha: context.sha, sha: context.sha,
}) })
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
- name: Create GitHub release - name: Create GitHub release
if: github.ref_name == 'release' if: github.ref_name == 'release'
uses: actions/github-script@v7 uses: actions/github-script@v6
with: with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries # Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5 retries: 5
@@ -1224,11 +1230,3 @@ jobs:
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/${PREFIX}/${FILENAME} time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/${PREFIX}/${FILENAME}
done done
pin-build-tools-image:
needs: [ build-build-tools-image, promote-images, regress-tests ]
if: github.ref_name == 'main'
uses: ./.github/workflows/pin-build-tools-image.yml
with:
from-tag: ${{ needs.build-build-tools-image.outputs.image-tag }}
secrets: inherit

View File

@@ -1,58 +0,0 @@
name: Check build-tools image
on:
workflow_call:
outputs:
image-tag:
description: "build-tools image tag"
value: ${{ jobs.check-image.outputs.tag }}
found:
description: "Whether the image is found in the registry"
value: ${{ jobs.check-image.outputs.found }}
defaults:
run:
shell: bash -euo pipefail {0}
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {}
jobs:
check-image:
runs-on: ubuntu-latest
outputs:
tag: ${{ steps.get-build-tools-tag.outputs.image-tag }}
found: ${{ steps.check-image.outputs.found }}
steps:
- name: Get build-tools image tag for the current commit
id: get-build-tools-tag
env:
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
LAST_BUILD_TOOLS_SHA=$(
gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
--method GET \
--field path=Dockerfile.build-tools \
--field sha=${COMMIT_SHA} \
--field per_page=1 \
--jq ".[0].sha" \
"/repos/${GITHUB_REPOSITORY}/commits"
)
echo "image-tag=${LAST_BUILD_TOOLS_SHA}" | tee -a $GITHUB_OUTPUT
- name: Check if such tag found in the registry
id: check-image
env:
IMAGE_TAG: ${{ steps.get-build-tools-tag.outputs.image-tag }}
run: |
if docker manifest inspect neondatabase/build-tools:${IMAGE_TAG}; then
found=true
else
found=false
fi
echo "found=${found}" | tee -a $GITHUB_OUTPUT

View File

@@ -1,36 +0,0 @@
name: Check Permissions
on:
workflow_call:
inputs:
github-event-name:
required: true
type: string
defaults:
run:
shell: bash -euo pipefail {0}
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {}
jobs:
check-permissions:
runs-on: ubuntu-latest
steps:
- name: Disallow CI runs on PRs from forks
if: |
inputs.github-event-name == 'pull_request' &&
github.event.pull_request.head.repo.full_name != github.repository
run: |
if [ "${{ contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.pull_request.author_association) }}" = "true" ]; then
MESSAGE="Please create a PR from a branch of ${GITHUB_REPOSITORY} instead of a fork"
else
MESSAGE="The PR should be reviewed and labelled with 'approved-for-ci-run' to trigger a CI run"
fi
# TODO: use actions/github-script to post this message as a PR comment
echo >&2 "We don't run CI for PRs from forks"
echo >&2 "${MESSAGE}"
exit 1

View File

@@ -1,32 +0,0 @@
# A workflow from
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
name: cleanup caches by a branch
on:
pull_request:
types:
- closed
jobs:
cleanup:
runs-on: ubuntu-latest
steps:
- name: Cleanup
run: |
gh extension install actions/gh-actions-cache
echo "Fetching list of cache key"
cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 )
## Setting this to not fail the workflow while deleting cache keys.
set +e
echo "Deleting caches..."
for cacheKey in $cacheKeysForPR
do
gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
done
echo "Done"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
BRANCH: refs/pull/${{ github.event.pull_request.number }}/merge

View File

@@ -20,31 +20,13 @@ env:
COPT: '-Werror' COPT: '-Werror'
jobs: jobs:
check-permissions:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'run-no-ci') }}
uses: ./.github/workflows/check-permissions.yml
with:
github-event-name: ${{ github.event_name}}
check-build-tools-image:
needs: [ check-permissions ]
uses: ./.github/workflows/check-build-tools-image.yml
build-build-tools-image:
needs: [ check-build-tools-image ]
uses: ./.github/workflows/build-build-tools-image.yml
with:
image-tag: ${{ needs.check-build-tools-image.outputs.image-tag }}
secrets: inherit
check-macos-build: check-macos-build:
needs: [ check-permissions ]
if: | if: |
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main' github.ref_name == 'main'
timeout-minutes: 90 timeout-minutes: 90
runs-on: macos-14 runs-on: macos-latest
env: env:
# Use release build only, to have less debug info around # Use release build only, to have less debug info around
@@ -75,24 +57,24 @@ jobs:
- name: Cache postgres v14 build - name: Cache postgres v14 build
id: cache_pg_14 id: cache_pg_14
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v14 path: pg_install/v14
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v15 build - name: Cache postgres v15 build
id: cache_pg_15 id: cache_pg_15
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v15 path: pg_install/v15
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v16 build - name: Cache postgres v16 build
id: cache_pg_16 id: cache_pg_16
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v16 path: pg_install/v16
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Set extra env for macOS - name: Set extra env for macOS
run: | run: |
@@ -100,14 +82,14 @@ jobs:
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
- name: Cache cargo deps - name: Cache cargo deps
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: | path: |
~/.cargo/registry ~/.cargo/registry
!~/.cargo/registry/src !~/.cargo/registry/src
~/.cargo/git ~/.cargo/git
target target
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust key: v1-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
- name: Build postgres v14 - name: Build postgres v14
if: steps.cache_pg_14.outputs.cache-hit != 'true' if: steps.cache_pg_14.outputs.cache-hit != 'true'
@@ -128,13 +110,12 @@ jobs:
run: make walproposer-lib -j$(sysctl -n hw.ncpu) run: make walproposer-lib -j$(sysctl -n hw.ncpu)
- name: Run cargo build - name: Run cargo build
run: PQ_LIB_DIR=$(pwd)/pg_install/v16/lib cargo build --all --release run: cargo build --all --release
- name: Check that no warnings are produced - name: Check that no warnings are produced
run: ./run_clippy.sh run: ./run_clippy.sh
check-linux-arm-build: check-linux-arm-build:
needs: [ check-permissions, build-build-tools-image ]
timeout-minutes: 90 timeout-minutes: 90
runs-on: [ self-hosted, dev, arm64 ] runs-on: [ self-hosted, dev, arm64 ]
@@ -148,10 +129,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
steps: steps:
@@ -193,21 +171,21 @@ jobs:
- name: Cache postgres v14 build - name: Cache postgres v14 build
id: cache_pg_14 id: cache_pg_14
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v14 path: pg_install/v14
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v15 build - name: Cache postgres v15 build
id: cache_pg_15 id: cache_pg_15
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v15 path: pg_install/v15
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v16 build - name: Cache postgres v16 build
id: cache_pg_16 id: cache_pg_16
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: pg_install/v16 path: pg_install/v16
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
@@ -258,15 +236,11 @@ jobs:
cargo nextest run --package remote_storage --test test_real_azure cargo nextest run --package remote_storage --test test_real_azure
check-codestyle-rust-arm: check-codestyle-rust-arm:
needs: [ check-permissions, build-build-tools-image ]
timeout-minutes: 90 timeout-minutes: 90
runs-on: [ self-hosted, dev, arm64 ] runs-on: [ self-hosted, dev, arm64 ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
steps: steps:
@@ -333,17 +307,13 @@ jobs:
run: cargo deny check run: cargo deny check
gather-rust-build-stats: gather-rust-build-stats:
needs: [ check-permissions, build-build-tools-image ]
if: | if: |
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main' github.ref_name == 'main'
runs-on: [ self-hosted, gen3, large ] runs-on: [ self-hosted, gen3, large ]
container: container:
image: ${{ needs.build-build-tools-image.outputs.image }} image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init options: --init
env: env:
@@ -384,7 +354,7 @@ jobs:
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
- name: Publish build stats report - name: Publish build stats report
uses: actions/github-script@v7 uses: actions/github-script@v6
env: env:
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }} REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
SHA: ${{ github.event.pull_request.head.sha || github.sha }} SHA: ${{ github.event.pull_request.head.sha || github.sha }}

View File

@@ -28,7 +28,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
@@ -38,10 +38,11 @@ jobs:
uses: snok/install-poetry@v1 uses: snok/install-poetry@v1
- name: Cache poetry deps - name: Cache poetry deps
uses: actions/cache@v4 id: cache_poetry
uses: actions/cache@v3
with: with:
path: ~/.cache/pypoetry/virtualenvs path: ~/.cache/pypoetry/virtualenvs
key: v2-${{ runner.os }}-python-deps-ubunutu-latest-${{ hashFiles('poetry.lock') }} key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
- name: Install Python deps - name: Install Python deps
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
@@ -82,7 +83,7 @@ jobs:
# It will be fixed after switching to gen2 runner # It will be fixed after switching to gen2 runner
- name: Upload python test logs - name: Upload python test logs
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
retention-days: 7 retention-days: 7
name: python-test-pg_clients-${{ runner.os }}-stage-logs name: python-test-pg_clients-${{ runner.os }}-stage-logs

View File

@@ -1,72 +0,0 @@
name: 'Pin build-tools image'
on:
workflow_dispatch:
inputs:
from-tag:
description: 'Source tag'
required: true
type: string
workflow_call:
inputs:
from-tag:
description: 'Source tag'
required: true
type: string
defaults:
run:
shell: bash -euo pipefail {0}
concurrency:
group: pin-build-tools-image-${{ inputs.from-tag }}
permissions: {}
jobs:
tag-image:
runs-on: ubuntu-latest
env:
FROM_TAG: ${{ inputs.from-tag }}
TO_TAG: pinned
steps:
- name: Check if we really need to pin the image
id: check-manifests
run: |
docker manifest inspect neondatabase/build-tools:${FROM_TAG} > ${FROM_TAG}.json
docker manifest inspect neondatabase/build-tools:${TO_TAG} > ${TO_TAG}.json
if diff ${FROM_TAG}.json ${TO_TAG}.json; then
skip=true
else
skip=false
fi
echo "skip=${skip}" | tee -a $GITHUB_OUTPUT
- uses: docker/login-action@v3
if: steps.check-manifests.outputs.skip == 'false'
with:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Tag build-tools with `${{ env.TO_TAG }}` in Docker Hub
if: steps.check-manifests.outputs.skip == 'false'
run: |
docker buildx imagetools create -t neondatabase/build-tools:${TO_TAG} \
neondatabase/build-tools:${FROM_TAG}
- uses: docker/login-action@v3
if: steps.check-manifests.outputs.skip == 'false'
with:
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Tag build-tools with `${{ env.TO_TAG }}` in ECR
if: steps.check-manifests.outputs.skip == 'false'
run: |
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:${TO_TAG} \
neondatabase/build-tools:${FROM_TAG}

View File

@@ -2,31 +2,12 @@ name: Create Release Branch
on: on:
schedule: schedule:
# It should be kept in sync with if-condition in jobs - cron: '0 6 * * 1'
- cron: '0 6 * * MON' # Storage release
- cron: '0 6 * * THU' # Proxy release
workflow_dispatch: workflow_dispatch:
inputs:
create-storage-release-branch:
type: boolean
description: 'Create Storage release PR'
required: false
create-proxy-release-branch:
type: boolean
description: 'Create Proxy release PR'
required: false
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {}
defaults:
run:
shell: bash -euo pipefail {0}
jobs: jobs:
create-storage-release-branch: create_release_branch:
if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }} runs-on: [ ubuntu-latest ]
runs-on: ubuntu-latest
permissions: permissions:
contents: write # for `git push` contents: write # for `git push`
@@ -37,67 +18,27 @@ jobs:
with: with:
ref: main ref: main
- name: Set environment variables - name: Get current date
run: | id: date
echo "RELEASE_DATE=$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
echo "RELEASE_BRANCH=rc/$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
- name: Create release branch - name: Create release branch
run: git checkout -b $RELEASE_BRANCH run: git checkout -b releases/${{ steps.date.outputs.date }}
- name: Push new branch - name: Push new branch
run: git push origin $RELEASE_BRANCH run: git push origin releases/${{ steps.date.outputs.date }}
- name: Create pull request into release - name: Create pull request into release
env: env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
run: | run: |
cat << EOF > body.md cat << EOF > body.md
## Release ${RELEASE_DATE} ## Release ${{ steps.date.outputs.date }}
**Please merge this Pull Request using 'Create a merge commit' button** **Please merge this PR using 'Create a merge commit'!**
EOF EOF
gh pr create --title "Release ${RELEASE_DATE}" \ gh pr create --title "Release ${{ steps.date.outputs.date }}" \
--body-file "body.md" \ --body-file "body.md" \
--head "${RELEASE_BRANCH}" \ --head "releases/${{ steps.date.outputs.date }}" \
--base "release" --base "release"
create-proxy-release-branch:
if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }}
runs-on: ubuntu-latest
permissions:
contents: write # for `git push`
steps:
- name: Check out code
uses: actions/checkout@v4
with:
ref: main
- name: Set environment variables
run: |
echo "RELEASE_DATE=$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
echo "RELEASE_BRANCH=rc/proxy/$(date +'%Y-%m-%d')" | tee -a $GITHUB_ENV
- name: Create release branch
run: git checkout -b $RELEASE_BRANCH
- name: Push new branch
run: git push origin $RELEASE_BRANCH
- name: Create pull request into release
env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
run: |
cat << EOF > body.md
## Proxy release ${RELEASE_DATE}
**Please merge this Pull Request using 'Create a merge commit' button**
EOF
gh pr create --title "Proxy release ${RELEASE_DATE}" \
--body-file "body.md" \
--head "${RELEASE_BRANCH}" \
--base "release-proxy"

View File

@@ -1,119 +0,0 @@
name: Trigger E2E Tests
on:
pull_request:
types:
- ready_for_review
workflow_call:
defaults:
run:
shell: bash -euxo pipefail {0}
env:
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
jobs:
cancel-previous-e2e-tests:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Cancel previous e2e-tests runs for this PR
env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
run: |
gh workflow --repo neondatabase/cloud \
run cancel-previous-in-concurrency-group.yml \
--field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}"
tag:
runs-on: [ ubuntu-latest ]
outputs:
build-tag: ${{ steps.build-tag.outputs.tag }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get build tag
env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
CURRENT_BRANCH: ${{ github.head_ref || github.ref_name }}
CURRENT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
run: |
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
echo "tag=$(git rev-list --count HEAD)" | tee -a $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
echo "tag=release-$(git rev-list --count HEAD)" | tee -a $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
BUILD_AND_TEST_RUN_ID=$(gh run list -b $CURRENT_BRANCH -c $CURRENT_SHA -w 'Build and Test' -L 1 --json databaseId --jq '.[].databaseId')
echo "tag=$BUILD_AND_TEST_RUN_ID" | tee -a $GITHUB_OUTPUT
fi
id: build-tag
trigger-e2e-tests:
needs: [ tag ]
runs-on: [ self-hosted, gen3, small ]
env:
TAG: ${{ needs.tag.outputs.build-tag }}
container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
options: --init
steps:
- name: check if ecr image are present
run: |
for REPO in neon compute-tools compute-node-v14 vm-compute-node-v14 compute-node-v15 vm-compute-node-v15 compute-node-v16 vm-compute-node-v16; do
OUTPUT=$(aws ecr describe-images --repository-name ${REPO} --region eu-central-1 --query "imageDetails[?imageTags[?contains(@, '${TAG}')]]" --output text)
if [ "$OUTPUT" == "" ]; then
echo "$REPO with image tag $TAG not found" >> $GITHUB_OUTPUT
exit 1
fi
done
- name: Set PR's status to pending and request a remote CI test
run: |
# For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
# to place a job run status update later.
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
REMOTE_REPO="${{ github.repository_owner }}/cloud"
curl -f -X POST \
https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
-H "Accept: application/vnd.github.v3+json" \
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
--data \
"{
\"state\": \"pending\",
\"context\": \"neon-cloud-e2e\",
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
}"
curl -f -X POST \
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
-H "Accept: application/vnd.github.v3+json" \
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
--data \
"{
\"ref\": \"main\",
\"inputs\": {
\"ci_job_name\": \"neon-cloud-e2e\",
\"commit_hash\": \"$COMMIT_SHA\",
\"remote_repo\": \"${{ github.repository }}\",
\"storage_image_tag\": \"${TAG}\",
\"compute_image_tag\": \"${TAG}\",
\"concurrency_group\": \"${{ env.E2E_CONCURRENCY_GROUP }}\"
}
}"

View File

@@ -0,0 +1,70 @@
name: 'Update build tools image tag'
# This workflow it used to update tag of build tools in ECR.
# The most common use case is adding/moving `pinned` tag to `${GITHUB_RUN_IT}` image.
on:
workflow_dispatch:
inputs:
from-tag:
description: 'Source tag'
required: true
type: string
to-tag:
description: 'Destination tag'
required: true
type: string
default: 'pinned'
defaults:
run:
shell: bash -euo pipefail {0}
permissions: {}
jobs:
tag-image:
runs-on: [ self-hosted, gen3, small ]
env:
ECR_IMAGE: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools
DOCKER_HUB_IMAGE: docker.io/neondatabase/build-tools
FROM_TAG: ${{ inputs.from-tag }}
TO_TAG: ${{ inputs.to-tag }}
steps:
# Use custom DOCKER_CONFIG directory to avoid conflicts with default settings
# The default value is ~/.docker
- name: Set custom docker config directory
run: |
mkdir -p .docker-custom
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
- uses: docker/login-action@v2
with:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- uses: docker/login-action@v2
with:
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Install crane
run: |
go install github.com/google/go-containerregistry/cmd/crane@a0658aa1d0cc7a7f1bcc4a3af9155335b6943f40 # v0.18.0
- name: Copy images
run: |
crane copy "${ECR_IMAGE}:${FROM_TAG}" "${ECR_IMAGE}:${TO_TAG}"
crane copy "${ECR_IMAGE}:${FROM_TAG}" "${DOCKER_HUB_IMAGE}:${TO_TAG}"
- name: Remove custom docker config directory
if: always()
run: |
rm -rf .docker-custom

1
.gitignore vendored
View File

@@ -9,7 +9,6 @@ test_output/
neon.iml neon.iml
/.neon /.neon
/integration_tests/.neon /integration_tests/.neon
compaction-suite-results.*
# Coverage # Coverage
*.profraw *.profraw

View File

@@ -1,13 +1,12 @@
/compute_tools/ @neondatabase/control-plane @neondatabase/compute /compute_tools/ @neondatabase/control-plane @neondatabase/compute
/control_plane/attachment_service @neondatabase/storage /control_plane/ @neondatabase/compute @neondatabase/storage
/libs/pageserver_api/ @neondatabase/storage /libs/pageserver_api/ @neondatabase/compute @neondatabase/storage
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/safekeepers /libs/postgres_ffi/ @neondatabase/compute
/libs/remote_storage/ @neondatabase/storage /libs/remote_storage/ @neondatabase/storage
/libs/safekeeper_api/ @neondatabase/safekeepers /libs/safekeeper_api/ @neondatabase/safekeepers
/libs/vm_monitor/ @neondatabase/autoscaling /libs/vm_monitor/ @neondatabase/autoscaling @neondatabase/compute
/pageserver/ @neondatabase/storage /pageserver/ @neondatabase/storage
/pgxn/ @neondatabase/compute /pgxn/ @neondatabase/compute
/pgxn/neon/ @neondatabase/compute @neondatabase/safekeepers
/proxy/ @neondatabase/proxy /proxy/ @neondatabase/proxy
/safekeeper/ @neondatabase/safekeepers /safekeeper/ @neondatabase/safekeepers
/vendor/ @neondatabase/compute /vendor/ @neondatabase/compute

View File

@@ -20,7 +20,7 @@ ln -s ../../pre-commit.py .git/hooks/pre-commit
This will run following checks on staged files before each commit: This will run following checks on staged files before each commit:
- `rustfmt` - `rustfmt`
- checks for Python files, see [obligatory checks](/docs/sourcetree.md#obligatory-checks). - checks for python files, see [obligatory checks](/docs/sourcetree.md#obligatory-checks).
There is also a separate script `./run_clippy.sh` that runs `cargo clippy` on the whole project There is also a separate script `./run_clippy.sh` that runs `cargo clippy` on the whole project
and `./scripts/reformat` that runs all formatting tools to ensure the project is up to date. and `./scripts/reformat` that runs all formatting tools to ensure the project is up to date.
@@ -54,9 +54,6 @@ _An instruction for maintainers_
- If and only if it looks **safe** (i.e. it doesn't contain any malicious code which could expose secrets or harm the CI), then: - If and only if it looks **safe** (i.e. it doesn't contain any malicious code which could expose secrets or harm the CI), then:
- Press the "Approve and run" button in GitHub UI - Press the "Approve and run" button in GitHub UI
- Add the `approved-for-ci-run` label to the PR - Add the `approved-for-ci-run` label to the PR
- Currently draft PR will skip e2e test (only for internal contributors). After turning the PR 'Ready to Review' CI will trigger e2e test
- Add `run-e2e-tests-in-draft` label to run e2e test in draft PR (override above behaviour)
- The `approved-for-ci-run` workflow will add `run-e2e-tests-in-draft` automatically to run e2e test for external contributors
Repeat all steps after any change to the PR. Repeat all steps after any change to the PR.
- When the changes are ready to get merged — merge the original PR (not the internal one) - When the changes are ready to get merged — merge the original PR (not the internal one)
@@ -74,11 +71,16 @@ We're using the following approach to make it work:
For details see [`approved-for-ci-run.yml`](.github/workflows/approved-for-ci-run.yml) For details see [`approved-for-ci-run.yml`](.github/workflows/approved-for-ci-run.yml)
## How do I make build-tools image "pinned" ## How do I add the "pinned" tag to an buildtools image?
We use the `pinned` tag for `Dockerfile.buildtools` build images in our CI/CD setup, currently adding the `pinned` tag is a manual operation.
It's possible to update the `pinned` tag of the `build-tools` image using the `pin-build-tools-image.yml` workflow. You can call it from GitHub UI: https://github.com/neondatabase/neon/actions/workflows/update_build_tools_image.yml,
or using GitHub CLI:
```bash ```bash
gh workflow -R neondatabase/neon run pin-build-tools-image.yml \ gh workflow -R neondatabase/neon run update_build_tools_image.yml \
-f from-tag=cc98d9b00d670f182c507ae3783342bd7e64c31e -f from-tag=6254913013 \
``` -f to-tag=pinned \
# Default `-f to-tag` is `pinned`, so the parameter can be omitted.
```

910
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@ members = [
"control_plane", "control_plane",
"control_plane/attachment_service", "control_plane/attachment_service",
"pageserver", "pageserver",
"pageserver/compaction",
"pageserver/ctl", "pageserver/ctl",
"pageserver/client", "pageserver/client",
"pageserver/pagebench", "pageserver/pagebench",
@@ -19,7 +18,6 @@ members = [
"libs/pageserver_api", "libs/pageserver_api",
"libs/postgres_ffi", "libs/postgres_ffi",
"libs/safekeeper_api", "libs/safekeeper_api",
"libs/desim",
"libs/utils", "libs/utils",
"libs/consumption_metrics", "libs/consumption_metrics",
"libs/postgres_backend", "libs/postgres_backend",
@@ -50,14 +48,11 @@ azure_storage_blobs = "0.18"
flate2 = "1.0.26" flate2 = "1.0.26"
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
aws-config = { version = "1.1.4", default-features = false, features=["rustls"] } aws-config = { version = "1.0", default-features = false, features=["rustls"] }
aws-sdk-s3 = "1.14" aws-sdk-s3 = "1.0"
aws-sdk-iam = "1.15.0" aws-smithy-async = { version = "1.0", default-features = false, features=["rt-tokio"] }
aws-smithy-async = { version = "1.1.4", default-features = false, features=["rt-tokio"] } aws-smithy-types = "1.0"
aws-smithy-types = "1.1.4" aws-credential-types = "1.0"
aws-credential-types = "1.1.4"
aws-sigv4 = { version = "1.2.0", features = ["sign-http"] }
aws-types = "1.1.7"
axum = { version = "0.6.20", features = ["ws"] } axum = { version = "0.6.20", features = ["ws"] }
base64 = "0.13.0" base64 = "0.13.0"
bincode = "1.3" bincode = "1.3"
@@ -69,6 +64,7 @@ camino = "1.1.6"
cfg-if = "1.0.0" cfg-if = "1.0.0"
chrono = { version = "0.4", default-features = false, features = ["clock"] } chrono = { version = "0.4", default-features = false, features = ["clock"] }
clap = { version = "4.0", features = ["derive"] } clap = { version = "4.0", features = ["derive"] }
close_fds = "0.3.2"
comfy-table = "6.1" comfy-table = "6.1"
const_format = "0.2" const_format = "0.2"
crc32c = "0.6" crc32c = "0.6"
@@ -78,20 +74,18 @@ either = "1.8"
enum-map = "2.4.2" enum-map = "2.4.2"
enumset = "1.0.12" enumset = "1.0.12"
fail = "0.5.0" fail = "0.5.0"
fallible-iterator = "0.2"
fs2 = "0.4.3" fs2 = "0.4.3"
futures = "0.3" futures = "0.3"
futures-core = "0.3" futures-core = "0.3"
futures-util = "0.3" futures-util = "0.3"
git-version = "0.3" git-version = "0.3"
hashbrown = "0.13" hashbrown = "0.13"
hashlink = "0.8.4" hashlink = "0.8.1"
hdrhistogram = "7.5.2" hdrhistogram = "7.5.2"
hex = "0.4" hex = "0.4"
hex-literal = "0.4" hex-literal = "0.4"
hmac = "0.12.1" hmac = "0.12.1"
hostname = "0.3.1" hostname = "0.3.1"
http = {version = "1.1.0", features = ["std"]}
http-types = { version = "2", default-features = false } http-types = { version = "2", default-features = false }
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1.1" humantime-serde = "1.1.1"
@@ -101,11 +95,8 @@ inotify = "0.10.2"
ipnet = "2.9.0" ipnet = "2.9.0"
itertools = "0.10" itertools = "0.10"
jsonwebtoken = "9" jsonwebtoken = "9"
lasso = "0.7"
leaky-bucket = "1.0.1"
libc = "0.2" libc = "0.2"
md5 = "0.7.0" md5 = "0.7.0"
measured = { version = "0.0.13", features=["default", "lasso"] }
memoffset = "0.8" memoffset = "0.8"
native-tls = "0.2" native-tls = "0.2"
nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] } nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] }
@@ -121,11 +112,10 @@ parquet = { version = "49.0.0", default-features = false, features = ["zstd"] }
parquet_derive = "49.0.0" parquet_derive = "49.0.0"
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
pin-project-lite = "0.2" pin-project-lite = "0.2"
procfs = "0.14"
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
prost = "0.11" prost = "0.11"
rand = "0.8" rand = "0.8"
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] } redis = { version = "0.24.0", features = ["tokio-rustls-comp", "keep-alive"] }
regex = "1.10.2" regex = "1.10.2"
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] } reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
reqwest-tracing = { version = "0.4.7", features = ["opentelemetry_0_20"] } reqwest-tracing = { version = "0.4.7", features = ["opentelemetry_0_20"] }
@@ -134,8 +124,8 @@ reqwest-retry = "0.2.2"
routerify = "3" routerify = "3"
rpds = "0.13" rpds = "0.13"
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
rustls = "0.22" rustls = "0.21"
rustls-pemfile = "2" rustls-pemfile = "1"
rustls-split = "0.3" rustls-split = "0.3"
scopeguard = "1.1" scopeguard = "1.1"
sysinfo = "0.29.2" sysinfo = "0.29.2"
@@ -153,20 +143,18 @@ smol_str = { version = "0.2.0", features = ["serde"] }
socket2 = "0.5" socket2 = "0.5"
strum = "0.24" strum = "0.24"
strum_macros = "0.24" strum_macros = "0.24"
"subtle" = "2.5.0"
svg_fmt = "0.4.1" svg_fmt = "0.4.1"
sync_wrapper = "0.1.2" sync_wrapper = "0.1.2"
tar = "0.4" tar = "0.4"
task-local-extensions = "0.1.4" task-local-extensions = "0.1.4"
test-context = "0.1" test-context = "0.1"
thiserror = "1.0" thiserror = "1.0"
tikv-jemallocator = "0.5" tls-listener = { version = "0.7", features = ["rustls", "hyper-h1"] }
tikv-jemalloc-ctl = "0.5"
tokio = { version = "1.17", features = ["macros"] } tokio = { version = "1.17", features = ["macros"] }
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" } tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
tokio-io-timeout = "1.2.0" tokio-io-timeout = "1.2.0"
tokio-postgres-rustls = "0.11.0" tokio-postgres-rustls = "0.10.0"
tokio-rustls = "0.25" tokio-rustls = "0.24"
tokio-stream = "0.1" tokio-stream = "0.1"
tokio-tar = "0.3" tokio-tar = "0.3"
tokio-util = { version = "0.7.10", features = ["io", "rt"] } tokio-util = { version = "0.7.10", features = ["io", "rt"] }
@@ -177,9 +165,7 @@ tracing = "0.1"
tracing-error = "0.2.0" tracing-error = "0.2.0"
tracing-opentelemetry = "0.20.0" tracing-opentelemetry = "0.20.0"
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] } tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
twox-hash = { version = "1.6.3", default-features = false }
url = "2.2" url = "2.2"
urlencoding = "2.1"
uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] } uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] }
walkdir = "2.3.2" walkdir = "2.3.2"
webpki-roots = "0.25" webpki-roots = "0.25"
@@ -205,14 +191,12 @@ consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
metrics = { version = "0.1", path = "./libs/metrics/" } metrics = { version = "0.1", path = "./libs/metrics/" }
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" } pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
pageserver_client = { path = "./pageserver/client" } pageserver_client = { path = "./pageserver/client" }
pageserver_compaction = { version = "0.1", path = "./pageserver/compaction/" }
postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" } postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" } postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" } postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
pq_proto = { version = "0.1", path = "./libs/pq_proto/" } pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
remote_storage = { version = "0.1", path = "./libs/remote_storage/" } remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" } safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
desim = { version = "0.1", path = "./libs/desim" }
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy. storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" } tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" } tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" }
@@ -225,7 +209,7 @@ workspace_hack = { version = "0.1", path = "./workspace_hack/" }
## Build dependencies ## Build dependencies
criterion = "0.5.1" criterion = "0.5.1"
rcgen = "0.12" rcgen = "0.11"
rstest = "0.18" rstest = "0.18"
camino-tempfile = "1.0.2" camino-tempfile = "1.0.2"
tonic-build = "0.9" tonic-build = "0.9"

View File

@@ -47,13 +47,12 @@ COPY --chown=nonroot . .
# Show build caching stats to check if it was used in the end. # Show build caching stats to check if it was used in the end.
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats. # Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
RUN set -e \ RUN set -e \
&& RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment" cargo build \ && mold -run cargo build \
--bin pg_sni_router \ --bin pg_sni_router \
--bin pageserver \ --bin pageserver \
--bin pagectl \ --bin pagectl \
--bin safekeeper \ --bin safekeeper \
--bin storage_broker \ --bin storage_broker \
--bin storage_controller \
--bin proxy \ --bin proxy \
--bin neon_local \ --bin neon_local \
--locked --release \ --locked --release \
@@ -81,7 +80,6 @@ COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pagectl /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/pagectl /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_controller /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/neon_local /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/neon_local /usr/local/bin
@@ -100,11 +98,6 @@ RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
-c "listen_pg_addr='0.0.0.0:6400'" \ -c "listen_pg_addr='0.0.0.0:6400'" \
-c "listen_http_addr='0.0.0.0:9898'" -c "listen_http_addr='0.0.0.0:9898'"
# When running a binary that links with libpq, default to using our most recent postgres version. Binaries
# that want a particular postgres version will select it explicitly: this is just a default.
ENV LD_LIBRARY_PATH /usr/local/v16/lib
VOLUME ["/data"] VOLUME ["/data"]
USER neon USER neon
EXPOSE 6400 EXPOSE 6400

View File

@@ -111,7 +111,7 @@ USER nonroot:nonroot
WORKDIR /home/nonroot WORKDIR /home/nonroot
# Python # Python
ENV PYTHON_VERSION=3.9.18 \ ENV PYTHON_VERSION=3.9.2 \
PYENV_ROOT=/home/nonroot/.pyenv \ PYENV_ROOT=/home/nonroot/.pyenv \
PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH
RUN set -e \ RUN set -e \
@@ -135,7 +135,7 @@ WORKDIR /home/nonroot
# Rust # Rust
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`) # Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
ENV RUSTC_VERSION=1.77.0 ENV RUSTC_VERSION=1.75.0
ENV RUSTUP_HOME="/home/nonroot/.rustup" ENV RUSTUP_HOME="/home/nonroot/.rustup"
ENV PATH="/home/nonroot/.cargo/bin:${PATH}" ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
@@ -149,7 +149,7 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux
cargo install --git https://github.com/paritytech/cachepot && \ cargo install --git https://github.com/paritytech/cachepot && \
cargo install rustfilt && \ cargo install rustfilt && \
cargo install cargo-hakari && \ cargo install cargo-hakari && \
cargo install cargo-deny --locked && \ cargo install cargo-deny && \
cargo install cargo-hack && \ cargo install cargo-hack && \
cargo install cargo-nextest && \ cargo install cargo-nextest && \
rm -rf /home/nonroot/.cargo/registry && \ rm -rf /home/nonroot/.cargo/registry && \

View File

@@ -520,7 +520,8 @@ RUN apt-get update && \
libboost-regex1.74-dev \ libboost-regex1.74-dev \
libboost-serialization1.74-dev \ libboost-serialization1.74-dev \
libboost-system1.74-dev \ libboost-system1.74-dev \
libeigen3-dev libeigen3-dev \
libfreetype6-dev
ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH" ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH"
RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.gz -O rdkit.tar.gz && \ RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.gz -O rdkit.tar.gz && \
@@ -546,7 +547,6 @@ RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.
-D PostgreSQL_LIBRARY_DIR=`pg_config --libdir` \ -D PostgreSQL_LIBRARY_DIR=`pg_config --libdir` \
-D RDK_INSTALL_INTREE=OFF \ -D RDK_INSTALL_INTREE=OFF \
-D RDK_INSTALL_COMIC_FONTS=OFF \ -D RDK_INSTALL_COMIC_FONTS=OFF \
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
-D CMAKE_BUILD_TYPE=Release \ -D CMAKE_BUILD_TYPE=Release \
. && \ . && \
make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
@@ -639,8 +639,8 @@ FROM build-deps AS pg-anon-pg-build
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
ENV PATH "/usr/local/pgsql/bin/:$PATH" ENV PATH "/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/neondatabase/postgresql_anonymizer/archive/refs/tags/neon_1.1.1.tar.gz -O pg_anon.tar.gz && \ RUN wget https://gitlab.com/dalibo/postgresql_anonymizer/-/archive/1.1.0/postgresql_anonymizer-1.1.0.tar.gz -O pg_anon.tar.gz && \
echo "321ea8d5c1648880aafde850a2c576e4a9e7b9933a34ce272efc839328999fa9 pg_anon.tar.gz" | sha256sum --check && \ echo "08b09d2ff9b962f96c60db7e6f8e79cf7253eb8772516998fc35ece08633d3ad pg_anon.tar.gz" | sha256sum --check && \
mkdir pg_anon-src && cd pg_anon-src && tar xvzf ../pg_anon.tar.gz --strip-components=1 -C . && \ mkdir pg_anon-src && cd pg_anon-src && tar xvzf ../pg_anon.tar.gz --strip-components=1 -C . && \
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\ find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
@@ -769,40 +769,6 @@ RUN wget https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_2_5.tar.
make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install make -j $(getconf _NPROCESSORS_ONLN) install
#########################################################################################
#
# Layer "pg_ivm"
# compile pg_ivm extension
#
#########################################################################################
FROM build-deps AS pg-ivm-build
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
ENV PATH "/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/sraoss/pg_ivm/archive/refs/tags/v1.7.tar.gz -O pg_ivm.tar.gz && \
echo "ebfde04f99203c7be4b0e873f91104090e2e83e5429c32ac242d00f334224d5e pg_ivm.tar.gz" | sha256sum --check && \
mkdir pg_ivm-src && cd pg_ivm-src && tar xvzf ../pg_ivm.tar.gz --strip-components=1 -C . && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_ivm.control
#########################################################################################
#
# Layer "pg_partman"
# compile pg_partman extension
#
#########################################################################################
FROM build-deps AS pg-partman-build
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
ENV PATH "/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/pgpartman/pg_partman/archive/refs/tags/v5.0.1.tar.gz -O pg_partman.tar.gz && \
echo "75b541733a9659a6c90dbd40fccb904a630a32880a6e3044d0c4c5f4c8a65525 pg_partman.tar.gz" | sha256sum --check && \
mkdir pg_partman-src && cd pg_partman-src && tar xvzf ../pg_partman.tar.gz --strip-components=1 -C . && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_partman.control
######################################################################################### #########################################################################################
# #
# Layer "neon-pg-ext-build" # Layer "neon-pg-ext-build"
@@ -843,9 +809,6 @@ COPY --from=pg-roaringbitmap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-semver-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-semver-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-embedding-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-embedding-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=wal2json-pg-build /usr/local/pgsql /usr/local/pgsql COPY --from=wal2json-pg-build /usr/local/pgsql /usr/local/pgsql
COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
COPY pgxn/ pgxn/ COPY pgxn/ pgxn/
RUN make -j $(getconf _NPROCESSORS_ONLN) \ RUN make -j $(getconf _NPROCESSORS_ONLN) \
@@ -856,10 +819,6 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
PG_CONFIG=/usr/local/pgsql/bin/pg_config \ PG_CONFIG=/usr/local/pgsql/bin/pg_config \
-C pgxn/neon_utils \ -C pgxn/neon_utils \
-s install && \ -s install && \
make -j $(getconf _NPROCESSORS_ONLN) \
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
-C pgxn/neon_test_utils \
-s install && \
make -j $(getconf _NPROCESSORS_ONLN) \ make -j $(getconf _NPROCESSORS_ONLN) \
PG_CONFIG=/usr/local/pgsql/bin/pg_config \ PG_CONFIG=/usr/local/pgsql/bin/pg_config \
-C pgxn/neon_rmgr \ -C pgxn/neon_rmgr \
@@ -891,17 +850,7 @@ ENV BUILD_TAG=$BUILD_TAG
USER nonroot USER nonroot
# Copy entire project to get Cargo.* files with proper dependencies for the whole project # Copy entire project to get Cargo.* files with proper dependencies for the whole project
COPY --chown=nonroot . . COPY --chown=nonroot . .
RUN cd compute_tools && mold -run cargo build --locked --profile release-line-debug-size-lto RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
#########################################################################################
#
# Final compute-tools image
#
#########################################################################################
FROM debian:bullseye-slim AS compute-tools-image
COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
######################################################################################### #########################################################################################
# #
@@ -952,7 +901,7 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS # libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
# libxml2, libxslt1.1 for xml2 # libxml2, libxslt1.1 for xml2
# libzstd1 for zstd # libzstd1 for zstd
# libboost* for rdkit # libboost*, libfreetype6, and zlib1g for rdkit
# ca-certificates for communicating with s3 by compute_ctl # ca-certificates for communicating with s3 by compute_ctl
RUN apt update && \ RUN apt update && \
apt install --no-install-recommends -y \ apt install --no-install-recommends -y \
@@ -965,6 +914,7 @@ RUN apt update && \
libboost-serialization1.74.0 \ libboost-serialization1.74.0 \
libboost-system1.74.0 \ libboost-system1.74.0 \
libossp-uuid16 \ libossp-uuid16 \
libfreetype6 \
libgeos-c1v5 \ libgeos-c1v5 \
libgdal28 \ libgdal28 \
libproj19 \ libproj19 \
@@ -976,6 +926,7 @@ RUN apt update && \
libcurl4-openssl-dev \ libcurl4-openssl-dev \
locales \ locales \
procps \ procps \
zlib1g \
ca-certificates && \ ca-certificates && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8

32
Dockerfile.compute-tools Normal file
View File

@@ -0,0 +1,32 @@
# First transient image to build compute_tools binaries
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
ARG REPOSITORY=neondatabase
ARG IMAGE=build-tools
ARG TAG=pinned
ARG BUILD_TAG
FROM $REPOSITORY/$IMAGE:$TAG AS rust-build
WORKDIR /home/nonroot
# Enable https://github.com/paritytech/cachepot to cache Rust crates' compilation results in Docker builds.
# Set up cachepot to use an AWS S3 bucket for cache results, to reuse it between `docker build` invocations.
# cachepot falls back to local filesystem if S3 is misconfigured, not failing the build.
ARG RUSTC_WRAPPER=cachepot
ENV AWS_REGION=eu-central-1
ENV CACHEPOT_S3_KEY_PREFIX=cachepot
ARG CACHEPOT_BUCKET=neon-github-dev
#ARG AWS_ACCESS_KEY_ID
#ARG AWS_SECRET_ACCESS_KEY
ARG BUILD_TAG
ENV BUILD_TAG=$BUILD_TAG
COPY . .
RUN set -e \
&& mold -run cargo build -p compute_tools --locked --release \
&& cachepot -s
# Final image that only has one binary
FROM debian:bullseye-slim
COPY --from=rust-build /home/nonroot/target/release/compute_ctl /usr/local/bin/compute_ctl

View File

@@ -51,8 +51,6 @@ CARGO_BUILD_FLAGS += $(filter -j1,$(MAKEFLAGS))
CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+) CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
# Force cargo not to print progress bar # Force cargo not to print progress bar
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1 CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
# Set PQ_LIB_DIR to make sure `storage_controller` get linked with bundled libpq (through diesel)
CARGO_CMD_PREFIX += PQ_LIB_DIR=$(POSTGRES_INSTALL_DIR)/v16/lib
# #
# Top level Makefile to build Neon and PostgreSQL # Top level Makefile to build Neon and PostgreSQL
@@ -159,8 +157,8 @@ neon-pg-ext-%: postgres-%
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \ -C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install -f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install
.PHONY: neon-pg-clean-ext-% .PHONY: neon-pg-ext-clean-%
neon-pg-clean-ext-%: neon-pg-ext-clean-%:
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
-C $(POSTGRES_INSTALL_DIR)/build/neon-$* \ -C $(POSTGRES_INSTALL_DIR)/build/neon-$* \
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
@@ -176,10 +174,10 @@ neon-pg-clean-ext-%:
# Build walproposer as a static library. walproposer source code is located # Build walproposer as a static library. walproposer source code is located
# in the pgxn/neon directory. # in the pgxn/neon directory.
# #
# We also need to include libpgport.a and libpgcommon.a, because walproposer # We also need to include libpgport.a and libpgcommon.a, because walproposer
# uses some functions from those libraries. # uses some functions from those libraries.
# #
# Some object files are removed from libpgport.a and libpgcommon.a because # Some object files are removed from libpgport.a and libpgcommon.a because
# they depend on openssl and other libraries that are not included in our # they depend on openssl and other libraries that are not included in our
# Rust build. # Rust build.
@@ -216,11 +214,11 @@ neon-pg-ext: \
neon-pg-ext-v15 \ neon-pg-ext-v15 \
neon-pg-ext-v16 neon-pg-ext-v16
.PHONY: neon-pg-clean-ext .PHONY: neon-pg-ext-clean
neon-pg-clean-ext: \ neon-pg-ext-clean: \
neon-pg-clean-ext-v14 \ neon-pg-ext-clean-v14 \
neon-pg-clean-ext-v15 \ neon-pg-ext-clean-v15 \
neon-pg-clean-ext-v16 neon-pg-ext-clean-v16
# shorthand to build all Postgres versions # shorthand to build all Postgres versions
.PHONY: postgres .PHONY: postgres
@@ -249,7 +247,7 @@ postgres-check: \
# This doesn't remove the effects of 'configure'. # This doesn't remove the effects of 'configure'.
.PHONY: clean .PHONY: clean
clean: postgres-clean neon-pg-clean-ext clean: postgres-clean neon-pg-ext-clean
$(CARGO_CMD_PREFIX) cargo clean $(CARGO_CMD_PREFIX) cargo clean
# This removes everything # This removes everything

2
NOTICE
View File

@@ -1,5 +1,5 @@
Neon Neon
Copyright 2022 - 2024 Neon Inc. Copyright 2022 Neon Inc.
The PostgreSQL submodules in vendor/ are licensed under the PostgreSQL license. The PostgreSQL submodules in vendor/ are licensed under the PostgreSQL license.
See vendor/postgres-vX/COPYRIGHT for details. See vendor/postgres-vX/COPYRIGHT for details.

View File

@@ -5,7 +5,7 @@
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes. Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
## Quick start ## Quick start
Try the [Neon Free Tier](https://neon.tech/github) to create a serverless Postgres instance. Then connect to it with your preferred Postgres client (psql, dbeaver, etc) or use the online [SQL Editor](https://neon.tech/docs/get-started-with-neon/query-with-neon-sql-editor/). See [Connect from any application](https://neon.tech/docs/connect/connect-from-any-app/) for connection instructions. Try the [Neon Free Tier](https://neon.tech/docs/introduction/technical-preview-free-tier/) to create a serverless Postgres instance. Then connect to it with your preferred Postgres client (psql, dbeaver, etc) or use the online [SQL Editor](https://neon.tech/docs/get-started-with-neon/query-with-neon-sql-editor/). See [Connect from any application](https://neon.tech/docs/connect/connect-from-any-app/) for connection instructions.
Alternatively, compile and run the project [locally](#running-local-installation). Alternatively, compile and run the project [locally](#running-local-installation).
@@ -14,8 +14,8 @@ Alternatively, compile and run the project [locally](#running-local-installation
A Neon installation consists of compute nodes and the Neon storage engine. Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine. A Neon installation consists of compute nodes and the Neon storage engine. Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
The Neon storage engine consists of two major components: The Neon storage engine consists of two major components:
- Pageserver: Scalable storage backend for the compute nodes. - Pageserver. Scalable storage backend for the compute nodes.
- Safekeepers: The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage. - Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information. See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information.
@@ -81,9 +81,9 @@ The project uses [rust toolchain file](./rust-toolchain.toml) to define the vers
This file is automatically picked up by [`rustup`](https://rust-lang.github.io/rustup/overrides.html#the-toolchain-file) that installs (if absent) and uses the toolchain version pinned in the file. This file is automatically picked up by [`rustup`](https://rust-lang.github.io/rustup/overrides.html#the-toolchain-file) that installs (if absent) and uses the toolchain version pinned in the file.
rustup users who want to build with another toolchain can use the [`rustup override`](https://rust-lang.github.io/rustup/overrides.html#directory-overrides) command to set a specific toolchain for the project's directory. rustup users who want to build with another toolchain can use [`rustup override`](https://rust-lang.github.io/rustup/overrides.html#directory-overrides) command to set a specific toolchain for the project's directory.
non-rustup users most probably are not getting the same toolchain automatically from the file, so are responsible to manually verify that their toolchain matches the version in the file. non-rustup users most probably are not getting the same toolchain automatically from the file, so are responsible to manually verify their toolchain matches the version in the file.
Newer rustc versions most probably will work fine, yet older ones might not be supported due to some new features used by the project or the crates. Newer rustc versions most probably will work fine, yet older ones might not be supported due to some new features used by the project or the crates.
#### Building on Linux #### Building on Linux
@@ -124,7 +124,7 @@ make -j`sysctl -n hw.logicalcpu` -s
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively. To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
To run the integration tests or Python scripts (not required to use the code), install To run the integration tests or Python scripts (not required to use the code), install
Python (3.9 or higher), and install the python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory. Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory.
#### Running neon database #### Running neon database
@@ -166,7 +166,7 @@ Starting postgres at 'postgresql://cloud_admin@127.0.0.1:55432/postgres'
2. Now, it is possible to connect to postgres and run some queries: 2. Now, it is possible to connect to postgres and run some queries:
```text ```text
> psql -p 55432 -h 127.0.0.1 -U cloud_admin postgres > psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
postgres=# CREATE TABLE t(key int primary key, value text); postgres=# CREATE TABLE t(key int primary key, value text);
CREATE TABLE CREATE TABLE
postgres=# insert into t values(1,1); postgres=# insert into t values(1,1);
@@ -205,7 +205,7 @@ Starting postgres at 'postgresql://cloud_admin@127.0.0.1:55434/postgres'
# this new postgres instance will have all the data from 'main' postgres, # this new postgres instance will have all the data from 'main' postgres,
# but all modifications would not affect data in original postgres # but all modifications would not affect data in original postgres
> psql -p 55434 -h 127.0.0.1 -U cloud_admin postgres > psql -p55434 -h 127.0.0.1 -U cloud_admin postgres
postgres=# select * from t; postgres=# select * from t;
key | value key | value
-----+------- -----+-------
@@ -216,7 +216,7 @@ postgres=# insert into t values(2,2);
INSERT 0 1 INSERT 0 1
# check that the new change doesn't affect the 'main' postgres # check that the new change doesn't affect the 'main' postgres
> psql -p 55432 -h 127.0.0.1 -U cloud_admin postgres > psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
postgres=# select * from t; postgres=# select * from t;
key | value key | value
-----+------- -----+-------
@@ -224,28 +224,14 @@ postgres=# select * from t;
(1 row) (1 row)
``` ```
4. If you want to run tests afterwards (see below), you must stop all the running pageserver, safekeeper, and postgres instances 4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
you have just started. You can terminate them all with one command: you have just started. You can terminate them all with one command:
```sh ```sh
> cargo neon stop > cargo neon stop
``` ```
More advanced usages can be found at [Control Plane and Neon Local](./control_plane/README.md).
#### Handling build failures
If you encounter errors during setting up the initial tenant, it's best to stop everything (`cargo neon stop`) and remove the `.neon` directory. Then fix the problems, and start the setup again.
## Running tests ## Running tests
### Rust unit tests
We are using [`cargo-nextest`](https://nexte.st/) to run the tests in Github Workflows.
Some crates do not support running plain `cargo test` anymore, prefer `cargo nextest run` instead.
You can install `cargo-nextest` with `cargo install cargo-nextest`.
### Integration tests
Ensure your dependencies are installed as described [here](https://github.com/neondatabase/neon#dependency-installation-notes). Ensure your dependencies are installed as described [here](https://github.com/neondatabase/neon#dependency-installation-notes).
```sh ```sh
@@ -257,28 +243,12 @@ CARGO_BUILD_FLAGS="--features=testing" make
``` ```
By default, this runs both debug and release modes, and all supported postgres versions. When By default, this runs both debug and release modes, and all supported postgres versions. When
testing locally, it is convenient to run just one set of permutations, like this: testing locally, it is convenient to run just run one set of permutations, like this:
```sh ```sh
DEFAULT_PG_VERSION=15 BUILD_TYPE=release ./scripts/pytest DEFAULT_PG_VERSION=15 BUILD_TYPE=release ./scripts/pytest
``` ```
## Flamegraphs
You may find yourself in need of flamegraphs for software in this repository.
You can use [`flamegraph-rs`](https://github.com/flamegraph-rs/flamegraph) or the original [`flamegraph.pl`](https://github.com/brendangregg/FlameGraph). Your choice!
>[!IMPORTANT]
> If you're using `lld` or `mold`, you need the `--no-rosegment` linker argument.
> It's a [general thing with Rust / lld / mold](https://crbug.com/919499#c16), not specific to this repository.
> See [this PR for further instructions](https://github.com/neondatabase/neon/pull/6764).
## Cleanup
For cleaning up the source tree from build artifacts, run `make clean` in the source directory.
For removing every artifact from build and configure steps, run `make distclean`, and also consider removing the cargo binaries in the `target` directory, as well as the database in the `.neon` directory. Note that removing the `.neon` directory will remove your database, with all data in it. You have been warned!
## Documentation ## Documentation
[docs](/docs) Contains a top-level overview of all available markdown documentation. [docs](/docs) Contains a top-level overview of all available markdown documentation.

View File

@@ -2,13 +2,4 @@ disallowed-methods = [
"tokio::task::block_in_place", "tokio::task::block_in_place",
# Allow this for now, to deny it later once we stop using Handle::block_on completely # Allow this for now, to deny it later once we stop using Handle::block_on completely
# "tokio::runtime::Handle::block_on", # "tokio::runtime::Handle::block_on",
# use tokio_epoll_uring_ext instead
"tokio_epoll_uring::thread_local_system",
]
disallowed-macros = [
# use std::pin::pin
"futures::pin_mut",
# cannot disallow this, because clippy finds used from tokio macros
#"tokio::pin",
] ]

View File

@@ -32,29 +32,6 @@ compute_ctl -D /var/db/postgres/compute \
-b /usr/local/bin/postgres -b /usr/local/bin/postgres
``` ```
## State Diagram
Computes can be in various states. Below is a diagram that details how a
compute moves between states.
```mermaid
%% https://mermaid.js.org/syntax/stateDiagram.html
stateDiagram-v2
[*] --> Empty : Compute spawned
Empty --> ConfigurationPending : Waiting for compute spec
ConfigurationPending --> Configuration : Received compute spec
Configuration --> Failed : Failed to configure the compute
Configuration --> Running : Compute has been configured
Empty --> Init : Compute spec is immediately available
Empty --> TerminationPending : Requested termination
Init --> Failed : Failed to start Postgres
Init --> Running : Started Postgres
Running --> TerminationPending : Requested termination
TerminationPending --> Terminated : Terminated compute
Failed --> [*] : Compute exited
Terminated --> [*] : Compute exited
```
## Tests ## Tests
Cargo formatter: Cargo formatter:

View File

@@ -45,6 +45,7 @@ use std::{thread, time::Duration};
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use chrono::Utc; use chrono::Utc;
use clap::Arg; use clap::Arg;
use nix::sys::signal::{kill, Signal};
use signal_hook::consts::{SIGQUIT, SIGTERM}; use signal_hook::consts::{SIGQUIT, SIGTERM};
use signal_hook::{consts::SIGINT, iterator::Signals}; use signal_hook::{consts::SIGINT, iterator::Signals};
use tracing::{error, info}; use tracing::{error, info};
@@ -52,9 +53,7 @@ use url::Url;
use compute_api::responses::ComputeStatus; use compute_api::responses::ComputeStatus;
use compute_tools::compute::{ use compute_tools::compute::{ComputeNode, ComputeState, ParsedSpec, PG_PID, SYNC_SAFEKEEPERS_PID};
forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
};
use compute_tools::configurator::launch_configurator; use compute_tools::configurator::launch_configurator;
use compute_tools::extension_server::get_pg_version; use compute_tools::extension_server::get_pg_version;
use compute_tools::http::api::launch_http_server; use compute_tools::http::api::launch_http_server;
@@ -395,15 +394,6 @@ fn main() -> Result<()> {
info!("synced safekeepers at lsn {lsn}"); info!("synced safekeepers at lsn {lsn}");
} }
let mut state = compute.state.lock().unwrap();
if state.status == ComputeStatus::TerminationPending {
state.status = ComputeStatus::Terminated;
compute.state_changed.notify_all();
// we were asked to terminate gracefully, don't exit to avoid restart
delay_exit = true
}
drop(state);
if let Err(err) = compute.check_for_core_dumps() { if let Err(err) = compute.check_for_core_dumps() {
error!("error while checking for core dumps: {err:?}"); error!("error while checking for core dumps: {err:?}");
} }
@@ -533,7 +523,16 @@ fn cli() -> clap::Command {
/// wait for termination which would be easy then. /// wait for termination which would be easy then.
fn handle_exit_signal(sig: i32) { fn handle_exit_signal(sig: i32) {
info!("received {sig} termination signal"); info!("received {sig} termination signal");
forward_termination_signal(); let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
if ss_pid != 0 {
let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
kill(ss_pid, Signal::SIGTERM).ok();
}
let pg_pid = PG_PID.load(Ordering::SeqCst);
if pg_pid != 0 {
let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
kill(pg_pid, Signal::SIGTERM).ok();
}
exit(1); exit(1);
} }

View File

@@ -2,7 +2,7 @@ use std::collections::HashMap;
use std::env; use std::env;
use std::fs; use std::fs;
use std::io::BufRead; use std::io::BufRead;
use std::os::unix::fs::{symlink, PermissionsExt}; use std::os::unix::fs::PermissionsExt;
use std::path::Path; use std::path::Path;
use std::process::{Command, Stdio}; use std::process::{Command, Stdio};
use std::str::FromStr; use std::str::FromStr;
@@ -17,9 +17,9 @@ use chrono::{DateTime, Utc};
use futures::future::join_all; use futures::future::join_all;
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::StreamExt;
use nix::unistd::Pid;
use postgres::error::SqlState;
use postgres::{Client, NoTls}; use postgres::{Client, NoTls};
use tokio;
use tokio_postgres;
use tracing::{debug, error, info, instrument, warn}; use tracing::{debug, error, info, instrument, warn};
use utils::id::{TenantId, TimelineId}; use utils::id::{TenantId, TimelineId};
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -28,8 +28,6 @@ use compute_api::responses::{ComputeMetrics, ComputeStatus};
use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec}; use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec};
use utils::measured_stream::MeasuredReader; use utils::measured_stream::MeasuredReader;
use nix::sys::signal::{kill, Signal};
use remote_storage::{DownloadError, RemotePath}; use remote_storage::{DownloadError, RemotePath};
use crate::checker::create_availability_check_data; use crate::checker::create_availability_check_data;
@@ -209,7 +207,6 @@ fn maybe_cgexec(cmd: &str) -> Command {
/// Create special neon_superuser role, that's a slightly nerfed version of a real superuser /// Create special neon_superuser role, that's a slightly nerfed version of a real superuser
/// that we give to customers /// that we give to customers
#[instrument(skip_all)]
fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> { fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
let roles = spec let roles = spec
.cluster .cluster
@@ -322,12 +319,11 @@ impl ComputeNode {
// Get basebackup from the libpq connection to pageserver using `connstr` and // Get basebackup from the libpq connection to pageserver using `connstr` and
// unarchive it to `pgdata` directory overriding all its previous content. // unarchive it to `pgdata` directory overriding all its previous content.
#[instrument(skip_all, fields(%lsn))] #[instrument(skip_all, fields(%lsn))]
fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> { fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
let spec = compute_state.pspec.as_ref().expect("spec must be set"); let spec = compute_state.pspec.as_ref().expect("spec must be set");
let start_time = Instant::now(); let start_time = Instant::now();
let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap(); let mut config = postgres::Config::from_str(&spec.pageserver_connstr)?;
let mut config = postgres::Config::from_str(shard0_connstr)?;
// Use the storage auth token from the config file, if given. // Use the storage auth token from the config file, if given.
// Note: this overrides any password set in the connection string. // Note: this overrides any password set in the connection string.
@@ -394,34 +390,6 @@ impl ComputeNode {
Ok(()) Ok(())
} }
// Gets the basebackup in a retry loop
#[instrument(skip_all, fields(%lsn))]
pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
let mut retry_period_ms = 500.0;
let mut attempts = 0;
let max_attempts = 10;
loop {
let result = self.try_get_basebackup(compute_state, lsn);
match result {
Ok(_) => {
return result;
}
Err(ref e) if attempts < max_attempts => {
warn!(
"Failed to get basebackup: {} (attempt {}/{})",
e, attempts, max_attempts
);
std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
retry_period_ms *= 1.5;
}
Err(_) => {
return result;
}
}
attempts += 1;
}
}
pub async fn check_safekeepers_synced_async( pub async fn check_safekeepers_synced_async(
&self, &self,
compute_state: &ComputeState, compute_state: &ComputeState,
@@ -637,48 +605,6 @@ impl ComputeNode {
// Update pg_hba.conf received with basebackup. // Update pg_hba.conf received with basebackup.
update_pg_hba(pgdata_path)?; update_pg_hba(pgdata_path)?;
// Place pg_dynshmem under /dev/shm. This allows us to use
// 'dynamic_shared_memory_type = mmap' so that the files are placed in
// /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
//
// Why on earth don't we just stick to the 'posix' default, you might
// ask. It turns out that making large allocations with 'posix' doesn't
// work very well with autoscaling. The behavior we want is that:
//
// 1. You can make large DSM allocations, larger than the current RAM
// size of the VM, without errors
//
// 2. If the allocated memory is really used, the VM is scaled up
// automatically to accommodate that
//
// We try to make that possible by having swap in the VM. But with the
// default 'posix' DSM implementation, we fail step 1, even when there's
// plenty of swap available. PostgreSQL uses posix_fallocate() to create
// the shmem segment, which is really just a file in /dev/shm in Linux,
// but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
// than available RAM.
//
// Using 'dynamic_shared_memory_type = mmap' works around that, because
// the Postgres 'mmap' DSM implementation doesn't use
// posix_fallocate(). Instead, it uses repeated calls to write(2) to
// fill the file with zeros. It's weird that that differs between
// 'posix' and 'mmap', but we take advantage of it. When the file is
// filled slowly with write(2), the kernel allows it to grow larger, as
// long as there's swap available.
//
// In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
// segment to be larger than currently available RAM. But because we
// don't want to store it on a real file, which the kernel would try to
// flush to disk, so symlink pg_dynshm to /dev/shm.
//
// We don't set 'dynamic_shared_memory_type = mmap' here, we let the
// control plane control that option. If 'mmap' is not used, this
// symlink doesn't affect anything.
//
// See https://github.com/neondatabase/autoscaling/issues/800
std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
match spec.mode { match spec.mode {
ComputeMode::Primary => {} ComputeMode::Primary => {}
ComputeMode::Replica | ComputeMode::Static(..) => { ComputeMode::Replica | ComputeMode::Static(..) => {
@@ -723,12 +649,8 @@ impl ComputeNode {
// Stop it when it's ready // Stop it when it's ready
info!("waiting for postgres"); info!("waiting for postgres");
wait_for_postgres(&mut pg, Path::new(pgdata))?; wait_for_postgres(&mut pg, Path::new(pgdata))?;
// SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL pg.kill()?;
// it to avoid orphaned processes prowling around while datadir is info!("sent kill signal");
// wiped.
let pm_pid = Pid::from_raw(pg.id() as i32);
kill(pm_pid, Signal::SIGQUIT)?;
info!("sent SIGQUIT signal");
pg.wait()?; pg.wait()?;
info!("done prewarming"); info!("done prewarming");
@@ -769,26 +691,6 @@ impl ComputeNode {
Ok((pg, logs_handle)) Ok((pg, logs_handle))
} }
/// Do post configuration of the already started Postgres. This function spawns a background thread to
/// configure the database after applying the compute spec. Currently, it upgrades the neon extension
/// version. In the future, it may upgrade all 3rd-party extensions.
#[instrument(skip_all)]
pub fn post_apply_config(&self) -> Result<()> {
let connstr = self.connstr.clone();
thread::spawn(move || {
let func = || {
let mut client = Client::connect(connstr.as_str(), NoTls)?;
handle_neon_extension_upgrade(&mut client)
.context("handle_neon_extension_upgrade")?;
Ok::<_, anyhow::Error>(())
};
if let Err(err) = func() {
error!("error while post_apply_config: {err:#}");
}
});
Ok(())
}
/// Do initial configuration of the already started Postgres. /// Do initial configuration of the already started Postgres.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> { pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
@@ -800,34 +702,27 @@ impl ComputeNode {
// but we can create a new one and grant it all privileges. // but we can create a new one and grant it all privileges.
let connstr = self.connstr.clone(); let connstr = self.connstr.clone();
let mut client = match Client::connect(connstr.as_str(), NoTls) { let mut client = match Client::connect(connstr.as_str(), NoTls) {
Err(e) => match e.code() { Err(e) => {
Some(&SqlState::INVALID_PASSWORD) info!(
| Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => { "cannot connect to postgres: {}, retrying with `zenith_admin` username",
// connect with zenith_admin if cloud_admin could not authenticate e
info!( );
"cannot connect to postgres: {}, retrying with `zenith_admin` username", let mut zenith_admin_connstr = connstr.clone();
e
);
let mut zenith_admin_connstr = connstr.clone();
zenith_admin_connstr zenith_admin_connstr
.set_username("zenith_admin") .set_username("zenith_admin")
.map_err(|_| anyhow::anyhow!("invalid connstr"))?; .map_err(|_| anyhow::anyhow!("invalid connstr"))?;
let mut client = let mut client = Client::connect(zenith_admin_connstr.as_str(), NoTls)?;
Client::connect(zenith_admin_connstr.as_str(), NoTls) // Disable forwarding so that users don't get a cloud_admin role
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?; client.simple_query("SET neon.forward_ddl = false")?;
// Disable forwarding so that users don't get a cloud_admin role client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
client.simple_query("SET neon.forward_ddl = false")?; client.simple_query("GRANT zenith_admin TO cloud_admin")?;
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?; drop(client);
client.simple_query("GRANT zenith_admin TO cloud_admin")?;
drop(client);
// reconnect with connstring with expected name // reconnect with connstring with expected name
Client::connect(connstr.as_str(), NoTls)? Client::connect(connstr.as_str(), NoTls)?
} }
_ => return Err(e.into()),
},
Ok(client) => client, Ok(client) => client,
}; };
@@ -841,12 +736,7 @@ impl ComputeNode {
handle_roles(spec, &mut client)?; handle_roles(spec, &mut client)?;
handle_databases(spec, &mut client)?; handle_databases(spec, &mut client)?;
handle_role_deletions(spec, connstr.as_str(), &mut client)?; handle_role_deletions(spec, connstr.as_str(), &mut client)?;
handle_grants( handle_grants(spec, &mut client, connstr.as_str())?;
spec,
&mut client,
connstr.as_str(),
self.has_feature(ComputeFeature::AnonExtension),
)?;
handle_extensions(spec, &mut client)?; handle_extensions(spec, &mut client)?;
handle_extension_neon(&mut client)?; handle_extension_neon(&mut client)?;
create_availability_check_data(&mut client)?; create_availability_check_data(&mut client)?;
@@ -854,11 +744,12 @@ impl ComputeNode {
// 'Close' connection // 'Close' connection
drop(client); drop(client);
// Run migrations separately to not hold up cold starts if self.has_feature(ComputeFeature::Migrations) {
thread::spawn(move || { thread::spawn(move || {
let mut client = Client::connect(connstr.as_str(), NoTls)?; let mut client = Client::connect(connstr.as_str(), NoTls)?;
handle_migrations(&mut client) handle_migrations(&mut client)
}); });
}
Ok(()) Ok(())
} }
@@ -920,12 +811,7 @@ impl ComputeNode {
handle_roles(&spec, &mut client)?; handle_roles(&spec, &mut client)?;
handle_databases(&spec, &mut client)?; handle_databases(&spec, &mut client)?;
handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?; handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
handle_grants( handle_grants(&spec, &mut client, self.connstr.as_str())?;
&spec,
&mut client,
self.connstr.as_str(),
self.has_feature(ComputeFeature::AnonExtension),
)?;
handle_extensions(&spec, &mut client)?; handle_extensions(&spec, &mut client)?;
handle_extension_neon(&mut client)?; handle_extension_neon(&mut client)?;
// We can skip handle_migrations here because a new migration can only appear // We can skip handle_migrations here because a new migration can only appear
@@ -1023,21 +909,18 @@ impl ComputeNode {
let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?; let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
let config_time = Utc::now(); let config_time = Utc::now();
if pspec.spec.mode == ComputeMode::Primary { if pspec.spec.mode == ComputeMode::Primary && !pspec.spec.skip_pg_catalog_updates {
if !pspec.spec.skip_pg_catalog_updates { let pgdata_path = Path::new(&self.pgdata);
let pgdata_path = Path::new(&self.pgdata); // temporarily reset max_cluster_size in config
// temporarily reset max_cluster_size in config // to avoid the possibility of hitting the limit, while we are applying config:
// to avoid the possibility of hitting the limit, while we are applying config: // creating new extensions, roles, etc...
// creating new extensions, roles, etc... config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?;
config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?; self.pg_reload_conf()?;
self.pg_reload_conf()?;
self.apply_config(&compute_state)?; self.apply_config(&compute_state)?;
config::compute_ctl_temp_override_remove(pgdata_path)?; config::compute_ctl_temp_override_remove(pgdata_path)?;
self.pg_reload_conf()?; self.pg_reload_conf()?;
}
self.post_apply_config()?;
} }
let startup_end_time = Utc::now(); let startup_end_time = Utc::now();
@@ -1358,17 +1241,3 @@ LIMIT 100",
Ok(remote_ext_metrics) Ok(remote_ext_metrics)
} }
} }
pub fn forward_termination_signal() {
let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
if ss_pid != 0 {
let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
kill(ss_pid, Signal::SIGTERM).ok();
}
let pg_pid = PG_PID.load(Ordering::SeqCst);
if pg_pid != 0 {
let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
// use 'immediate' shutdown (SIGQUIT): https://www.postgresql.org/docs/current/server-shutdown.html
kill(pg_pid, Signal::SIGQUIT).ok();
}
}

View File

@@ -17,7 +17,6 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
.write(true) .write(true)
.create(true) .create(true)
.append(false) .append(false)
.truncate(false)
.open(path)?; .open(path)?;
let buf = io::BufReader::new(&file); let buf = io::BufReader::new(&file);
let mut count: usize = 0; let mut count: usize = 0;
@@ -52,9 +51,6 @@ pub fn write_postgres_conf(
if let Some(s) = &spec.pageserver_connstring { if let Some(s) = &spec.pageserver_connstring {
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?; writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
} }
if let Some(stripe_size) = spec.shard_stripe_size {
writeln!(file, "neon.stripe_size={stripe_size}")?;
}
if !spec.safekeeper_connstrings.is_empty() { if !spec.safekeeper_connstrings.is_empty() {
writeln!( writeln!(
file, file,
@@ -83,12 +79,6 @@ pub fn write_postgres_conf(
ComputeMode::Replica => { ComputeMode::Replica => {
// hot_standby is 'on' by default, but let's be explicit // hot_standby is 'on' by default, but let's be explicit
writeln!(file, "hot_standby=on")?; writeln!(file, "hot_standby=on")?;
// Inform the replica about the primary state
// Default is 'false'
if let Some(primary_is_running) = spec.primary_is_running {
writeln!(file, "neon.primary_is_running={}", primary_is_running)?;
}
} }
} }

View File

@@ -71,7 +71,7 @@ More specifically, here is an example ext_index.json
} }
} }
*/ */
use anyhow::Result; use anyhow::{self, Result};
use anyhow::{bail, Context}; use anyhow::{bail, Context};
use bytes::Bytes; use bytes::Bytes;
use compute_api::spec::RemoteExtSpec; use compute_api::spec::RemoteExtSpec;

View File

@@ -5,7 +5,6 @@ use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::thread;
use crate::compute::forward_termination_signal;
use crate::compute::{ComputeNode, ComputeState, ParsedSpec}; use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
use compute_api::requests::ConfigurationRequest; use compute_api::requests::ConfigurationRequest;
use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError}; use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError};
@@ -13,6 +12,8 @@ use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIErr
use anyhow::Result; use anyhow::Result;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode}; use hyper::{Body, Method, Request, Response, Server, StatusCode};
use num_cpus;
use serde_json;
use tokio::task; use tokio::task;
use tracing::{error, info, warn}; use tracing::{error, info, warn};
use tracing_utils::http::OtelName; use tracing_utils::http::OtelName;
@@ -122,17 +123,6 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
} }
} }
(&Method::POST, "/terminate") => {
info!("serving /terminate POST request");
match handle_terminate_request(compute).await {
Ok(()) => Response::new(Body::empty()),
Err((msg, code)) => {
error!("error handling /terminate request: {msg}");
render_json_error(&msg, code)
}
}
}
// download extension files from remote extension storage on demand // download extension files from remote extension storage on demand
(&Method::POST, route) if route.starts_with("/extension_server/") => { (&Method::POST, route) if route.starts_with("/extension_server/") => {
info!("serving {:?} POST request", route); info!("serving {:?} POST request", route);
@@ -307,49 +297,6 @@ fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
.unwrap() .unwrap()
} }
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
{
let mut state = compute.state.lock().unwrap();
if state.status == ComputeStatus::Terminated {
return Ok(());
}
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for termination request: {:?}",
state.status.clone()
);
return Err((msg, StatusCode::PRECONDITION_FAILED));
}
state.status = ComputeStatus::TerminationPending;
compute.state_changed.notify_all();
drop(state);
}
forward_termination_signal();
info!("sent signal and notified waiters");
// Spawn a blocking thread to wait for compute to become Terminated.
// This is needed to do not block the main pool of workers and
// be able to serve other requests while some particular request
// is waiting for compute to finish configuration.
let c = compute.clone();
task::spawn_blocking(move || {
let mut state = c.state.lock().unwrap();
while state.status != ComputeStatus::Terminated {
state = c.state_changed.wait(state).unwrap();
info!(
"waiting for compute to become Terminated, current status: {:?}",
state.status
);
}
Ok(())
})
.await
.unwrap()?;
info!("terminated Postgres");
Ok(())
}
// Main Hyper HTTP server function that runs it and blocks waiting on it forever. // Main Hyper HTTP server function that runs it and blocks waiting on it forever.
#[tokio::main] #[tokio::main]
async fn serve(port: u16, state: Arc<ComputeNode>) { async fn serve(port: u16, state: Arc<ComputeNode>) {

View File

@@ -168,29 +168,6 @@ paths:
schema: schema:
$ref: "#/components/schemas/GenericError" $ref: "#/components/schemas/GenericError"
/terminate:
post:
tags:
- Terminate
summary: Terminate Postgres and wait for it to exit
description: ""
operationId: terminate
responses:
200:
description: Result
412:
description: "wrong state"
content:
application/json:
schema:
$ref: "#/components/schemas/GenericError"
500:
description: "Unexpected error"
content:
application/json:
schema:
$ref: "#/components/schemas/GenericError"
components: components:
securitySchemes: securitySchemes:
JWT: JWT:

View File

@@ -138,34 +138,6 @@ fn watch_compute_activity(compute: &ComputeNode) {
} }
} }
// //
// Don't suspend compute if there is an active logical replication subscription
//
// `where pid is not null` to filter out read only computes and subscription on branches
//
let logical_subscriptions_query =
"select count(*) from pg_stat_subscription where pid is not null;";
match cli.query_one(logical_subscriptions_query, &[]) {
Ok(row) => match row.try_get::<&str, i64>("count") {
Ok(num_subscribers) => {
if num_subscribers > 0 {
compute.update_last_active(Some(Utc::now()));
continue;
}
}
Err(e) => {
warn!("failed to parse `pg_stat_subscription` count: {:?}", e);
continue;
}
},
Err(e) => {
warn!(
"failed to get list of active logical replication subscriptions: {:?}",
e
);
continue;
}
}
//
// Do not suspend compute if autovacuum is running // Do not suspend compute if autovacuum is running
// //
let autovacuum_count_query = "select count(*) from pg_stat_activity where backend_type = 'autovacuum worker'"; let autovacuum_count_query = "select count(*) from pg_stat_activity where backend_type = 'autovacuum worker'";

View File

@@ -264,10 +264,9 @@ pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
// case we miss some events for some reason. Not strictly necessary, but // case we miss some events for some reason. Not strictly necessary, but
// better safe than sorry. // better safe than sorry.
let (tx, rx) = std::sync::mpsc::channel(); let (tx, rx) = std::sync::mpsc::channel();
let watcher_res = notify::recommended_watcher(move |res| { let (mut watcher, rx): (Box<dyn Watcher>, _) = match notify::recommended_watcher(move |res| {
let _ = tx.send(res); let _ = tx.send(res);
}); }) {
let (mut watcher, rx): (Box<dyn Watcher>, _) = match watcher_res {
Ok(watcher) => (Box::new(watcher), rx), Ok(watcher) => (Box::new(watcher), rx),
Err(e) => { Err(e) => {
match e.kind { match e.kind {

View File

@@ -302,9 +302,9 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
RoleAction::Create => { RoleAction::Create => {
// This branch only runs when roles are created through the console, so it is // This branch only runs when roles are created through the console, so it is
// safe to add more permissions here. BYPASSRLS and REPLICATION are inherited // safe to add more permissions here. BYPASSRLS and REPLICATION are inherited
// from neon_superuser. (NOTE: REPLICATION has been removed from here for now). // from neon_superuser.
let mut query: String = format!( let mut query: String = format!(
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS IN ROLE neon_superuser", "CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser",
name.pg_quote() name.pg_quote()
); );
info!("running role create query: '{}'", &query); info!("running role create query: '{}'", &query);
@@ -581,12 +581,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants /// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
/// to allow users creating trusted extensions and re-creating `public` schema, for example. /// to allow users creating trusted extensions and re-creating `public` schema, for example.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn handle_grants( pub fn handle_grants(spec: &ComputeSpec, client: &mut Client, connstr: &str) -> Result<()> {
spec: &ComputeSpec,
client: &mut Client,
connstr: &str,
enable_anon_extension: bool,
) -> Result<()> {
info!("modifying database permissions"); info!("modifying database permissions");
let existing_dbs = get_existing_dbs(client)?; let existing_dbs = get_existing_dbs(client)?;
@@ -655,9 +650,6 @@ pub fn handle_grants(
// remove this code if possible. The worst thing that could happen is that // remove this code if possible. The worst thing that could happen is that
// user won't be able to use public schema in NEW databases created in the // user won't be able to use public schema in NEW databases created in the
// very OLD project. // very OLD project.
//
// Also, alter default permissions so that relations created by extensions can be
// used by neon_superuser without permission issues.
let grant_query = "DO $$\n\ let grant_query = "DO $$\n\
BEGIN\n\ BEGIN\n\
IF EXISTS(\n\ IF EXISTS(\n\
@@ -676,15 +668,6 @@ pub fn handle_grants(
GRANT CREATE ON SCHEMA public TO web_access;\n\ GRANT CREATE ON SCHEMA public TO web_access;\n\
END IF;\n\ END IF;\n\
END IF;\n\ END IF;\n\
IF EXISTS(\n\
SELECT nspname\n\
FROM pg_catalog.pg_namespace\n\
WHERE nspname = 'public'\n\
)\n\
THEN\n\
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser WITH GRANT OPTION;\n\
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser WITH GRANT OPTION;\n\
END IF;\n\
END\n\ END\n\
$$;" $$;"
.to_string(); .to_string();
@@ -695,11 +678,6 @@ pub fn handle_grants(
inlinify(&grant_query) inlinify(&grant_query)
); );
db_client.simple_query(&grant_query)?; db_client.simple_query(&grant_query)?;
// it is important to run this after all grants
if enable_anon_extension {
handle_extension_anon(spec, &db.owner, &mut db_client, false)?;
}
} }
Ok(()) Ok(())
@@ -743,21 +721,9 @@ pub fn handle_extension_neon(client: &mut Client) -> Result<()> {
// which may happen in two cases: // which may happen in two cases:
// - extension was just installed // - extension was just installed
// - extension was already installed and is up to date // - extension was already installed and is up to date
// DISABLED due to compute node unpinning epic let query = "ALTER EXTENSION neon UPDATE";
// let query = "ALTER EXTENSION neon UPDATE"; info!("update neon extension schema with query: {}", query);
// info!("update neon extension version with query: {}", query); client.simple_query(query)?;
// client.simple_query(query)?;
Ok(())
}
#[instrument(skip_all)]
pub fn handle_neon_extension_upgrade(_client: &mut Client) -> Result<()> {
info!("handle neon extension upgrade (not really)");
// DISABLED due to compute node unpinning epic
// let query = "ALTER EXTENSION neon UPDATE";
// info!("update neon extension version with query: {}", query);
// client.simple_query(query)?;
Ok(()) Ok(())
} }
@@ -792,33 +758,6 @@ BEGIN
END LOOP; END LOOP;
END $$; END $$;
"#, "#,
r#"
DO $$
BEGIN
IF (SELECT setting::numeric >= 160000 FROM pg_settings WHERE name = 'server_version_num') THEN
EXECUTE 'GRANT pg_create_subscription TO neon_superuser';
END IF;
END
$$;"#,
"GRANT pg_monitor TO neon_superuser WITH ADMIN OPTION",
// Don't remove: these are some SQLs that we originally applied in migrations but turned out to execute somewhere else.
"",
"",
"",
"",
// Add new migrations below.
r#"
DO $$
DECLARE
role_name TEXT;
BEGIN
FOR role_name IN SELECT rolname FROM pg_roles WHERE rolreplication IS TRUE
LOOP
RAISE NOTICE 'EXECUTING ALTER ROLE % NOREPLICATION', quote_ident(role_name);
EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOREPLICATION';
END LOOP;
END
$$;"#,
]; ];
let mut query = "CREATE SCHEMA IF NOT EXISTS neon_migration"; let mut query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
@@ -845,13 +784,8 @@ $$;"#,
client.simple_query(query)?; client.simple_query(query)?;
while current_migration < migrations.len() { while current_migration < migrations.len() {
let migration = &migrations[current_migration]; info!("Running migration:\n{}\n", migrations[current_migration]);
if migration.is_empty() { client.simple_query(migrations[current_migration])?;
info!("Skip migration id={}", current_migration);
} else {
info!("Running migration:\n{}\n", migration);
client.simple_query(migration)?;
}
current_migration += 1; current_migration += 1;
} }
let setval = format!( let setval = format!(
@@ -867,125 +801,5 @@ $$;"#,
"Ran {} migrations", "Ran {} migrations",
(migrations.len() - starting_migration_id) (migrations.len() - starting_migration_id)
); );
Ok(())
}
/// Connect to the database as superuser and pre-create anon extension
/// if it is present in shared_preload_libraries
#[instrument(skip_all)]
pub fn handle_extension_anon(
spec: &ComputeSpec,
db_owner: &str,
db_client: &mut Client,
grants_only: bool,
) -> Result<()> {
info!("handle extension anon");
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
if libs.contains("anon") {
if !grants_only {
// check if extension is already initialized using anon.is_initialized()
let query = "SELECT anon.is_initialized()";
match db_client.query(query, &[]) {
Ok(rows) => {
if !rows.is_empty() {
let is_initialized: bool = rows[0].get(0);
if is_initialized {
info!("anon extension is already initialized");
return Ok(());
}
}
}
Err(e) => {
warn!(
"anon extension is_installed check failed with expected error: {}",
e
);
}
};
// Create anon extension if this compute needs it
// Users cannot create it themselves, because superuser is required.
let mut query = "CREATE EXTENSION IF NOT EXISTS anon CASCADE";
info!("creating anon extension with query: {}", query);
match db_client.query(query, &[]) {
Ok(_) => {}
Err(e) => {
error!("anon extension creation failed with error: {}", e);
return Ok(());
}
}
// check that extension is installed
query = "SELECT extname FROM pg_extension WHERE extname = 'anon'";
let rows = db_client.query(query, &[])?;
if rows.is_empty() {
error!("anon extension is not installed");
return Ok(());
}
// Initialize anon extension
// This also requires superuser privileges, so users cannot do it themselves.
query = "SELECT anon.init()";
match db_client.query(query, &[]) {
Ok(_) => {}
Err(e) => {
error!("anon.init() failed with error: {}", e);
return Ok(());
}
}
}
// check that extension is installed, if not bail early
let query = "SELECT extname FROM pg_extension WHERE extname = 'anon'";
match db_client.query(query, &[]) {
Ok(rows) => {
if rows.is_empty() {
error!("anon extension is not installed");
return Ok(());
}
}
Err(e) => {
error!("anon extension check failed with error: {}", e);
return Ok(());
}
};
let query = format!("GRANT ALL ON SCHEMA anon TO {}", db_owner);
info!("granting anon extension permissions with query: {}", query);
db_client.simple_query(&query)?;
// Grant permissions to db_owner to use anon extension functions
let query = format!("GRANT ALL ON ALL FUNCTIONS IN SCHEMA anon TO {}", db_owner);
info!("granting anon extension permissions with query: {}", query);
db_client.simple_query(&query)?;
// This is needed, because some functions are defined as SECURITY DEFINER.
// In Postgres SECURITY DEFINER functions are executed with the privileges
// of the owner.
// In anon extension this it is needed to access some GUCs, which are only accessible to
// superuser. But we've patched postgres to allow db_owner to access them as well.
// So we need to change owner of these functions to db_owner.
let query = format!("
SELECT 'ALTER FUNCTION '||nsp.nspname||'.'||p.proname||'('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};'
from pg_proc p
join pg_namespace nsp ON p.pronamespace = nsp.oid
where nsp.nspname = 'anon';", db_owner);
info!("change anon extension functions owner to db owner");
db_client.simple_query(&query)?;
// affects views as well
let query = format!("GRANT ALL ON ALL TABLES IN SCHEMA anon TO {}", db_owner);
info!("granting anon extension permissions with query: {}", query);
db_client.simple_query(&query)?;
let query = format!("GRANT ALL ON ALL SEQUENCES IN SCHEMA anon TO {}", db_owner);
info!("granting anon extension permissions with query: {}", query);
db_client.simple_query(&query)?;
}
}
Ok(()) Ok(())
} }

View File

@@ -10,9 +10,10 @@ async-trait.workspace = true
camino.workspace = true camino.workspace = true
clap.workspace = true clap.workspace = true
comfy-table.workspace = true comfy-table.workspace = true
diesel = { version = "2.1.4", features = ["postgres"]}
diesel_migrations = { version = "2.1.0", features = ["postgres"]}
futures.workspace = true futures.workspace = true
git-version.workspace = true git-version.workspace = true
humantime.workspace = true
nix.workspace = true nix.workspace = true
once_cell.workspace = true once_cell.workspace = true
postgres.workspace = true postgres.workspace = true

View File

@@ -1,26 +0,0 @@
# Control Plane and Neon Local
This crate contains tools to start a Neon development environment locally. This utility can be used with the `cargo neon` command.
## Example: Start with Postgres 16
To create and start a local development environment with Postgres 16, you will need to provide `--pg-version` flag to 3 of the start-up commands.
```shell
cargo neon init --pg-version 16
cargo neon start
cargo neon tenant create --set-default --pg-version 16
cargo neon endpoint create main --pg-version 16
cargo neon endpoint start main
```
## Example: Create Test User and Database
By default, `cargo neon` starts an endpoint with `cloud_admin` and `postgres` database. If you want to have a role and a database similar to what we have on the cloud service, you can do it with the following commands when starting an endpoint.
```shell
cargo neon endpoint create main --pg-version 16 --update-catalog true
cargo neon endpoint start main --create-test-user true
```
The first command creates `neon_superuser` and necessary roles. The second command creates `test` user and `neondb` database. You will see a connection string that connects you to the test user after running the second command.

View File

@@ -4,45 +4,28 @@ version = "0.1.0"
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[[bin]]
name = "storage_controller"
path = "src/main.rs"
[features]
default = []
# Enables test-only APIs and behaviors
testing = []
[dependencies] [dependencies]
anyhow.workspace = true anyhow.workspace = true
aws-config.workspace = true
bytes.workspace = true
camino.workspace = true camino.workspace = true
clap.workspace = true clap.workspace = true
fail.workspace = true
futures.workspace = true futures.workspace = true
git-version.workspace = true git-version.workspace = true
hex.workspace = true
hyper.workspace = true hyper.workspace = true
humantime.workspace = true
lasso.workspace = true
once_cell.workspace = true
pageserver_api.workspace = true pageserver_api.workspace = true
pageserver_client.workspace = true pageserver_client.workspace = true
postgres_connection.workspace = true postgres_connection.workspace = true
reqwest.workspace = true
routerify.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
thiserror.workspace = true thiserror.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-util.workspace = true tokio-util.workspace = true
tracing.workspace = true tracing.workspace = true
measured.workspace = true
diesel = { version = "2.1.4", features = ["serde_json", "postgres", "r2d2"] } # TODO: remove this after DB persistence is added, it is only used for
diesel_migrations = { version = "2.1.0" } # a parsing function when loading pageservers from neon_local LocalEnv
r2d2 = { version = "0.8.10" } postgres_backend.workspace = true
diesel = { version = "2.1.4", features = ["serde_json", "postgres"] }
utils = { path = "../../libs/utils/" } utils = { path = "../../libs/utils/" }
metrics = { path = "../../libs/metrics/" } metrics = { path = "../../libs/metrics/" }

View File

@@ -7,7 +7,6 @@ CREATE TABLE tenant_shards (
generation INTEGER NOT NULL, generation INTEGER NOT NULL,
generation_pageserver BIGINT NOT NULL, generation_pageserver BIGINT NOT NULL,
placement_policy VARCHAR NOT NULL, placement_policy VARCHAR NOT NULL,
splitting SMALLINT NOT NULL,
-- config is JSON encoded, opaque to the database. -- config is JSON encoded, opaque to the database.
config TEXT NOT NULL config TEXT NOT NULL
); );

View File

@@ -1,2 +0,0 @@
ALTER TABLE tenant_shards ALTER generation SET NOT NULL;
ALTER TABLE tenant_shards ALTER generation_pageserver SET NOT NULL;

View File

@@ -1,4 +0,0 @@
ALTER TABLE tenant_shards ALTER generation DROP NOT NULL;
ALTER TABLE tenant_shards ALTER generation_pageserver DROP NOT NULL;

View File

@@ -1,3 +0,0 @@
UPDATE tenant_shards set placement_policy='{"Double": 1}' where placement_policy='{"Attached": 1}';
UPDATE tenant_shards set placement_policy='"Single"' where placement_policy='{"Attached": 0}';

View File

@@ -1,3 +0,0 @@
UPDATE tenant_shards set placement_policy='{"Attached": 1}' where placement_policy='{"Double": 1}';
UPDATE tenant_shards set placement_policy='{"Attached": 0}' where placement_policy='"Single"';

View File

@@ -1,9 +0,0 @@
use utils::auth::{AuthError, Claims, Scope};
pub fn check_permission(claims: &Claims, required_scope: Scope) -> Result<(), AuthError> {
if claims.scope != required_scope {
return Err(AuthError("Scope mismatch. Permission denied".into()));
}
Ok(())
}

View File

@@ -1,164 +1,72 @@
use std::{collections::HashMap, time::Duration}; use std::collections::HashMap;
use control_plane::endpoint::{ComputeControlPlane, EndpointStatus}; use control_plane::endpoint::ComputeControlPlane;
use control_plane::local_env::LocalEnv; use control_plane::local_env::LocalEnv;
use hyper::{Method, StatusCode}; use pageserver_api::shard::{ShardCount, ShardIndex, TenantShardId};
use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId};
use postgres_connection::parse_host_port; use postgres_connection::parse_host_port;
use serde::{Deserialize, Serialize}; use utils::id::{NodeId, TenantId};
use tokio_util::sync::CancellationToken;
use utils::{
backoff::{self},
id::{NodeId, TenantId},
};
use crate::service::Config; pub(super) struct ComputeHookTenant {
shards: Vec<(ShardIndex, NodeId)>,
const BUSY_DELAY: Duration = Duration::from_secs(1);
const SLOWDOWN_DELAY: Duration = Duration::from_secs(5);
pub(crate) const API_CONCURRENCY: usize = 32;
struct ShardedComputeHookTenant {
stripe_size: ShardStripeSize,
shard_count: ShardCount,
shards: Vec<(ShardNumber, NodeId)>,
}
enum ComputeHookTenant {
Unsharded(NodeId),
Sharded(ShardedComputeHookTenant),
} }
impl ComputeHookTenant { impl ComputeHookTenant {
/// Construct with at least one shard's information pub(super) async fn maybe_reconfigure(&mut self, tenant_id: TenantId) -> anyhow::Result<()> {
fn new(tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize, node_id: NodeId) -> Self { // Find the highest shard count and drop any shards that aren't
if tenant_shard_id.shard_count.count() > 1 { // for that shard count.
Self::Sharded(ShardedComputeHookTenant { let shard_count = self.shards.iter().map(|(k, _v)| k.shard_count).max();
shards: vec![(tenant_shard_id.shard_number, node_id)], let Some(shard_count) = shard_count else {
stripe_size, // No shards, nothing to do.
shard_count: tenant_shard_id.shard_count, tracing::info!("ComputeHookTenant::maybe_reconfigure: no shards");
}) return Ok(());
} else { };
Self::Unsharded(node_id)
}
}
/// Set one shard's location. If stripe size or shard count have changed, Self is reset self.shards.retain(|(k, _v)| k.shard_count == shard_count);
/// and drops existing content. self.shards
fn update( .sort_by_key(|(shard, _node_id)| shard.shard_number);
&mut self,
tenant_shard_id: TenantShardId, if self.shards.len() == shard_count.0 as usize || shard_count == ShardCount(0) {
stripe_size: ShardStripeSize, // We have pageservers for all the shards: proceed to reconfigure compute
node_id: NodeId, let env = match LocalEnv::load_config() {
) { Ok(e) => e,
match self { Err(e) => {
Self::Unsharded(existing_node_id) if tenant_shard_id.shard_count.count() == 1 => { tracing::warn!(
*existing_node_id = node_id "Couldn't load neon_local config, skipping compute update ({e})"
} );
Self::Sharded(sharded_tenant) return Ok(());
if sharded_tenant.stripe_size == stripe_size }
&& sharded_tenant.shard_count == tenant_shard_id.shard_count => };
{ let cplane = ComputeControlPlane::load(env.clone())
if let Some(existing) = sharded_tenant .expect("Error loading compute control plane");
.shards
.iter() let compute_pageservers = self
.position(|s| s.0 == tenant_shard_id.shard_number) .shards
{ .iter()
sharded_tenant.shards.get_mut(existing).unwrap().1 = node_id; .map(|(_shard, node_id)| {
} else { let ps_conf = env
sharded_tenant .get_pageserver_conf(*node_id)
.shards .expect("Unknown pageserver");
.push((tenant_shard_id.shard_number, node_id)); let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
sharded_tenant.shards.sort_by_key(|s| s.0) .expect("Unable to parse listen_pg_addr");
(pg_host, pg_port.unwrap_or(5432))
})
.collect::<Vec<_>>();
for (endpoint_name, endpoint) in &cplane.endpoints {
if endpoint.tenant_id == tenant_id && endpoint.status() == "running" {
tracing::info!("🔁 Reconfiguring endpoint {}", endpoint_name,);
endpoint.reconfigure(compute_pageservers.clone()).await?;
} }
} }
_ => { } else {
// Shard count changed: reset struct. tracing::info!(
*self = Self::new(tenant_shard_id, stripe_size, node_id); "ComputeHookTenant::maybe_reconfigure: not enough shards ({}/{})",
} self.shards.len(),
shard_count.0
);
} }
}
}
#[derive(Serialize, Deserialize, Debug)] Ok(())
struct ComputeHookNotifyRequestShard {
node_id: NodeId,
shard_number: ShardNumber,
}
/// Request body that we send to the control plane to notify it of where a tenant is attached
#[derive(Serialize, Deserialize, Debug)]
struct ComputeHookNotifyRequest {
tenant_id: TenantId,
stripe_size: Option<ShardStripeSize>,
shards: Vec<ComputeHookNotifyRequestShard>,
}
/// Error type for attempts to call into the control plane compute notification hook
#[derive(thiserror::Error, Debug)]
pub(crate) enum NotifyError {
// Request was not send successfully, e.g. transport error
#[error("Sending request: {0}")]
Request(#[from] reqwest::Error),
// Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon.
#[error("Control plane tenant busy")]
Busy,
// Explicit 429 response asking us to retry less frequently
#[error("Control plane overloaded")]
SlowDown,
// A 503 response indicates the control plane can't handle the request right now
#[error("Control plane unavailable (status {0})")]
Unavailable(StatusCode),
// API returned unexpected non-success status. We will retry, but log a warning.
#[error("Control plane returned unexpected status {0}")]
Unexpected(StatusCode),
// We shutdown while sending
#[error("Shutting down")]
ShuttingDown,
// A response indicates we will never succeed, such as 400 or 404
#[error("Non-retryable error {0}")]
Fatal(StatusCode),
}
impl ComputeHookTenant {
fn maybe_reconfigure(&self, tenant_id: TenantId) -> Option<ComputeHookNotifyRequest> {
match self {
Self::Unsharded(node_id) => Some(ComputeHookNotifyRequest {
tenant_id,
shards: vec![ComputeHookNotifyRequestShard {
shard_number: ShardNumber(0),
node_id: *node_id,
}],
stripe_size: None,
}),
Self::Sharded(sharded_tenant)
if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize =>
{
Some(ComputeHookNotifyRequest {
tenant_id,
shards: sharded_tenant
.shards
.iter()
.map(|(shard_number, node_id)| ComputeHookNotifyRequestShard {
shard_number: *shard_number,
node_id: *node_id,
})
.collect(),
stripe_size: Some(sharded_tenant.stripe_size),
})
}
Self::Sharded(sharded_tenant) => {
// Sharded tenant doesn't yet have information for all its shards
tracing::info!(
"ComputeHookTenant::maybe_reconfigure: not enough shards ({}/{})",
sharded_tenant.shards.len(),
sharded_tenant.shard_count.count()
);
None
}
}
} }
} }
@@ -166,297 +74,43 @@ impl ComputeHookTenant {
/// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures /// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures
/// the compute connection string. /// the compute connection string.
pub(super) struct ComputeHook { pub(super) struct ComputeHook {
config: Config,
state: tokio::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>, state: tokio::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>,
authorization_header: Option<String>,
} }
impl ComputeHook { impl ComputeHook {
pub(super) fn new(config: Config) -> Self { pub(super) fn new() -> Self {
let authorization_header = config
.control_plane_jwt_token
.clone()
.map(|jwt| format!("Bearer {}", jwt));
Self { Self {
state: Default::default(), state: Default::default(),
config,
authorization_header,
} }
} }
/// For test environments: use neon_local's LocalEnv to update compute
async fn do_notify_local(
&self,
reconfigure_request: ComputeHookNotifyRequest,
) -> anyhow::Result<()> {
let env = match LocalEnv::load_config() {
Ok(e) => e,
Err(e) => {
tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})");
return Ok(());
}
};
let cplane =
ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane");
let ComputeHookNotifyRequest {
tenant_id,
shards,
stripe_size,
} = reconfigure_request;
let compute_pageservers = shards
.into_iter()
.map(|shard| {
let ps_conf = env
.get_pageserver_conf(shard.node_id)
.expect("Unknown pageserver");
let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
.expect("Unable to parse listen_pg_addr");
(pg_host, pg_port.unwrap_or(5432))
})
.collect::<Vec<_>>();
for (endpoint_name, endpoint) in &cplane.endpoints {
if endpoint.tenant_id == tenant_id && endpoint.status() == EndpointStatus::Running {
tracing::info!("Reconfiguring endpoint {}", endpoint_name,);
endpoint
.reconfigure(compute_pageservers.clone(), stripe_size)
.await?;
}
}
Ok(())
}
async fn do_notify_iteration(
&self,
client: &reqwest::Client,
url: &String,
reconfigure_request: &ComputeHookNotifyRequest,
cancel: &CancellationToken,
) -> Result<(), NotifyError> {
let req = client.request(Method::PUT, url);
let req = if let Some(value) = &self.authorization_header {
req.header(reqwest::header::AUTHORIZATION, value)
} else {
req
};
tracing::info!(
"Sending notify request to {} ({:?})",
url,
reconfigure_request
);
let send_result = req.json(&reconfigure_request).send().await;
let response = match send_result {
Ok(r) => r,
Err(e) => return Err(e.into()),
};
// Treat all 2xx responses as success
if response.status() >= StatusCode::OK && response.status() < StatusCode::MULTIPLE_CHOICES {
if response.status() != StatusCode::OK {
// Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so
// log a warning.
tracing::warn!(
"Unexpected 2xx response code {} from control plane",
response.status()
);
}
return Ok(());
}
// Error response codes
match response.status() {
StatusCode::TOO_MANY_REQUESTS => {
// TODO: 429 handling should be global: set some state visible to other requests
// so that they will delay before starting, rather than all notifications trying
// once before backing off.
tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled())
.await
.ok();
Err(NotifyError::SlowDown)
}
StatusCode::LOCKED => {
// Delay our retry if busy: the usual fast exponential backoff in backoff::retry
// is not appropriate
tokio::time::timeout(BUSY_DELAY, cancel.cancelled())
.await
.ok();
Err(NotifyError::Busy)
}
StatusCode::SERVICE_UNAVAILABLE
| StatusCode::GATEWAY_TIMEOUT
| StatusCode::BAD_GATEWAY => Err(NotifyError::Unavailable(response.status())),
StatusCode::BAD_REQUEST | StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
Err(NotifyError::Fatal(response.status()))
}
_ => Err(NotifyError::Unexpected(response.status())),
}
}
async fn do_notify(
&self,
url: &String,
reconfigure_request: ComputeHookNotifyRequest,
cancel: &CancellationToken,
) -> Result<(), NotifyError> {
let client = reqwest::Client::new();
backoff::retry(
|| self.do_notify_iteration(&client, url, &reconfigure_request, cancel),
|e| matches!(e, NotifyError::Fatal(_) | NotifyError::Unexpected(_)),
3,
10,
"Send compute notification",
cancel,
)
.await
.ok_or_else(|| NotifyError::ShuttingDown)
.and_then(|x| x)
}
/// Call this to notify the compute (postgres) tier of new pageservers to use
/// for a tenant. notify() is called by each shard individually, and this function
/// will decide whether an update to the tenant is sent. An update is sent on the
/// condition that:
/// - We know a pageserver for every shard.
/// - All the shards have the same shard_count (i.e. we are not mid-split)
///
/// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler
/// that is cancelled.
///
/// This function is fallible, including in the case that the control plane is transiently
/// unavailable. A limited number of retries are done internally to efficiently hide short unavailability
/// periods, but we don't retry forever. The **caller** is responsible for handling failures and
/// ensuring that they eventually call again to ensure that the compute is eventually notified of
/// the proper pageserver nodes for a tenant.
#[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), node_id))]
pub(super) async fn notify( pub(super) async fn notify(
&self, &self,
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
node_id: NodeId, node_id: NodeId,
stripe_size: ShardStripeSize, ) -> anyhow::Result<()> {
cancel: &CancellationToken, tracing::info!("ComputeHook::notify: {}->{}", tenant_shard_id, node_id);
) -> Result<(), NotifyError> {
let mut locked = self.state.lock().await; let mut locked = self.state.lock().await;
let entry = locked
.entry(tenant_shard_id.tenant_id)
.or_insert_with(|| ComputeHookTenant { shards: Vec::new() });
use std::collections::hash_map::Entry; let shard_index = ShardIndex {
let tenant = match locked.entry(tenant_shard_id.tenant_id) { shard_count: tenant_shard_id.shard_count,
Entry::Vacant(e) => e.insert(ComputeHookTenant::new( shard_number: tenant_shard_id.shard_number,
tenant_shard_id, };
stripe_size,
node_id, let mut set = false;
)), for (existing_shard, existing_node) in &mut entry.shards {
Entry::Occupied(e) => { if *existing_shard == shard_index {
let tenant = e.into_mut(); *existing_node = node_id;
tenant.update(tenant_shard_id, stripe_size, node_id); set = true;
tenant
} }
};
let reconfigure_request = tenant.maybe_reconfigure(tenant_shard_id.tenant_id);
let Some(reconfigure_request) = reconfigure_request else {
// The tenant doesn't yet have pageservers for all its shards: we won't notify anything
// until it does.
tracing::info!("Tenant isn't yet ready to emit a notification");
return Ok(());
};
if let Some(notify_url) = &self.config.compute_hook_url {
self.do_notify(notify_url, reconfigure_request, cancel)
.await
} else {
self.do_notify_local(reconfigure_request)
.await
.map_err(|e| {
// This path is for testing only, so munge the error into our prod-style error type.
tracing::error!("Local notification hook failed: {e}");
NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)
})
} }
} if !set {
} entry.shards.push((shard_index, node_id));
}
#[cfg(test)]
pub(crate) mod tests { entry.maybe_reconfigure(tenant_shard_id.tenant_id).await
use pageserver_api::shard::{ShardCount, ShardNumber};
use utils::id::TenantId;
use super::*;
#[test]
fn tenant_updates() -> anyhow::Result<()> {
let tenant_id = TenantId::generate();
let mut tenant_state = ComputeHookTenant::new(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(0),
shard_number: ShardNumber(0),
},
ShardStripeSize(12345),
NodeId(1),
);
// An unsharded tenant is always ready to emit a notification
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.shards
.len(),
1
);
assert!(tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.stripe_size
.is_none());
// Writing the first shard of a multi-sharded situation (i.e. in a split)
// resets the tenant state and puts it in an non-notifying state (need to
// see all shards)
tenant_state.update(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(2),
shard_number: ShardNumber(1),
},
ShardStripeSize(32768),
NodeId(1),
);
assert!(tenant_state.maybe_reconfigure(tenant_id).is_none());
// Writing the second shard makes it ready to notify
tenant_state.update(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(2),
shard_number: ShardNumber(0),
},
ShardStripeSize(32768),
NodeId(1),
);
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.shards
.len(),
2
);
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.stripe_size,
Some(ShardStripeSize(32768))
);
Ok(())
} }
} }

View File

@@ -1,227 +0,0 @@
use futures::{stream::FuturesUnordered, StreamExt};
use std::{
collections::HashMap,
sync::Arc,
time::{Duration, Instant},
};
use tokio_util::sync::CancellationToken;
use pageserver_api::{
controller_api::{NodeAvailability, UtilizationScore},
models::PageserverUtilization,
};
use thiserror::Error;
use utils::id::NodeId;
use crate::node::Node;
struct HeartbeaterTask {
receiver: tokio::sync::mpsc::UnboundedReceiver<HeartbeatRequest>,
cancel: CancellationToken,
state: HashMap<NodeId, PageserverState>,
max_unavailable_interval: Duration,
jwt_token: Option<String>,
}
#[derive(Debug, Clone)]
pub(crate) enum PageserverState {
Available {
last_seen_at: Instant,
utilization: PageserverUtilization,
},
Offline,
}
#[derive(Debug)]
pub(crate) struct AvailablityDeltas(pub Vec<(NodeId, PageserverState)>);
#[derive(Debug, Error)]
pub(crate) enum HeartbeaterError {
#[error("Cancelled")]
Cancel,
}
struct HeartbeatRequest {
pageservers: Arc<HashMap<NodeId, Node>>,
reply: tokio::sync::oneshot::Sender<Result<AvailablityDeltas, HeartbeaterError>>,
}
pub(crate) struct Heartbeater {
sender: tokio::sync::mpsc::UnboundedSender<HeartbeatRequest>,
}
impl Heartbeater {
pub(crate) fn new(
jwt_token: Option<String>,
max_unavailable_interval: Duration,
cancel: CancellationToken,
) -> Self {
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel::<HeartbeatRequest>();
let mut heartbeater =
HeartbeaterTask::new(receiver, jwt_token, max_unavailable_interval, cancel);
tokio::task::spawn(async move { heartbeater.run().await });
Self { sender }
}
pub(crate) async fn heartbeat(
&self,
pageservers: Arc<HashMap<NodeId, Node>>,
) -> Result<AvailablityDeltas, HeartbeaterError> {
let (sender, receiver) = tokio::sync::oneshot::channel();
self.sender
.send(HeartbeatRequest {
pageservers,
reply: sender,
})
.unwrap();
receiver.await.unwrap()
}
}
impl HeartbeaterTask {
fn new(
receiver: tokio::sync::mpsc::UnboundedReceiver<HeartbeatRequest>,
jwt_token: Option<String>,
max_unavailable_interval: Duration,
cancel: CancellationToken,
) -> Self {
Self {
receiver,
cancel,
state: HashMap::new(),
max_unavailable_interval,
jwt_token,
}
}
async fn run(&mut self) {
loop {
tokio::select! {
request = self.receiver.recv() => {
match request {
Some(req) => {
let res = self.heartbeat(req.pageservers).await;
req.reply.send(res).unwrap();
},
None => { return; }
}
},
_ = self.cancel.cancelled() => return
}
}
}
async fn heartbeat(
&mut self,
pageservers: Arc<HashMap<NodeId, Node>>,
) -> Result<AvailablityDeltas, HeartbeaterError> {
let mut new_state = HashMap::new();
let mut heartbeat_futs = FuturesUnordered::new();
for (node_id, node) in &*pageservers {
heartbeat_futs.push({
let jwt_token = self.jwt_token.clone();
let cancel = self.cancel.clone();
// Clone the node and mark it as available such that the request
// goes through to the pageserver even when the node is marked offline.
// This doesn't impact the availability observed by [`crate::service::Service`].
let mut node = node.clone();
node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
async move {
let response = node
.with_client_retries(
|client| async move { client.get_utilization().await },
&jwt_token,
3,
3,
Duration::from_secs(1),
&cancel,
)
.await;
let response = match response {
Some(r) => r,
None => {
// This indicates cancellation of the request.
// We ignore the node in this case.
return None;
}
};
let status = if let Ok(utilization) = response {
PageserverState::Available {
last_seen_at: Instant::now(),
utilization,
}
} else {
PageserverState::Offline
};
Some((*node_id, status))
}
});
loop {
let maybe_status = tokio::select! {
next = heartbeat_futs.next() => {
match next {
Some(result) => result,
None => { break; }
}
},
_ = self.cancel.cancelled() => { return Err(HeartbeaterError::Cancel); }
};
if let Some((node_id, status)) = maybe_status {
new_state.insert(node_id, status);
}
}
}
let mut deltas = Vec::new();
let now = Instant::now();
for (node_id, ps_state) in new_state {
use std::collections::hash_map::Entry::*;
let entry = self.state.entry(node_id);
let mut needs_update = false;
match entry {
Occupied(ref occ) => match (occ.get(), &ps_state) {
(PageserverState::Offline, PageserverState::Offline) => {}
(PageserverState::Available { last_seen_at, .. }, PageserverState::Offline) => {
if now - *last_seen_at >= self.max_unavailable_interval {
deltas.push((node_id, ps_state.clone()));
needs_update = true;
}
}
_ => {
deltas.push((node_id, ps_state.clone()));
needs_update = true;
}
},
Vacant(_) => {
deltas.push((node_id, ps_state.clone()));
}
}
match entry {
Occupied(mut occ) if needs_update => {
(*occ.get_mut()) = ps_state;
}
Vacant(vac) => {
vac.insert(ps_state);
}
_ => {}
}
}
Ok(AvailablityDeltas(deltas))
}
}

View File

@@ -1,27 +1,14 @@
use crate::metrics::{
HttpRequestLatencyLabelGroup, HttpRequestStatusLabelGroup, PageserverRequestLabelGroup,
METRICS_REGISTRY,
};
use crate::reconciler::ReconcileError; use crate::reconciler::ReconcileError;
use crate::service::{Service, STARTUP_RECONCILE_TIMEOUT}; use crate::service::{Service, STARTUP_RECONCILE_TIMEOUT};
use futures::Future;
use hyper::header::CONTENT_TYPE;
use hyper::{Body, Request, Response}; use hyper::{Body, Request, Response};
use hyper::{StatusCode, Uri}; use hyper::{StatusCode, Uri};
use pageserver_api::models::{ use pageserver_api::models::{TenantCreateRequest, TimelineCreateRequest};
TenantConfigRequest, TenantCreateRequest, TenantLocationConfigRequest, TenantShardSplitRequest,
TenantTimeTravelRequest, TimelineCreateRequest,
};
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use pageserver_client::mgmt_api;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use utils::auth::SwappableJwtAuth;
use tokio_util::sync::CancellationToken; use utils::http::endpoint::{auth_middleware, request_span};
use utils::auth::{Scope, SwappableJwtAuth}; use utils::http::request::parse_request_param;
use utils::failpoint_support::failpoints_handler; use utils::id::TenantId;
use utils::http::endpoint::{auth_middleware, check_permission_with, request_span};
use utils::http::request::{must_get_query_param, parse_query_param, parse_request_param};
use utils::id::{TenantId, TimelineId};
use utils::{ use utils::{
http::{ http::{
@@ -33,14 +20,12 @@ use utils::{
id::NodeId, id::NodeId,
}; };
use pageserver_api::controller_api::{ use pageserver_api::control_api::{ReAttachRequest, ValidateRequest};
NodeAvailability, NodeConfigureRequest, NodeRegisterRequest, TenantShardMigrateRequest,
use control_plane::attachment_service::{
AttachHookRequest, InspectRequest, NodeConfigureRequest, NodeRegisterRequest,
TenantShardMigrateRequest,
}; };
use pageserver_api::upcall_api::{ReAttachRequest, ValidateRequest};
use control_plane::storage_controller::{AttachHookRequest, InspectRequest};
use routerify::Middleware;
/// State available to HTTP request handlers /// State available to HTTP request handlers
#[derive(Clone)] #[derive(Clone)]
@@ -52,7 +37,7 @@ pub struct HttpState {
impl HttpState { impl HttpState {
pub fn new(service: Arc<crate::service::Service>, auth: Option<Arc<SwappableJwtAuth>>) -> Self { pub fn new(service: Arc<crate::service::Service>, auth: Option<Arc<SwappableJwtAuth>>) -> Self {
let allowlist_routes = ["/status", "/ready", "/metrics"] let allowlist_routes = ["/status"]
.iter() .iter()
.map(|v| v.parse().unwrap()) .map(|v| v.parse().unwrap())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@@ -74,18 +59,21 @@ fn get_state(request: &Request<Body>) -> &HttpState {
/// Pageserver calls into this on startup, to learn which tenants it should attach /// Pageserver calls into this on startup, to learn which tenants it should attach
async fn handle_re_attach(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_re_attach(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::GenerationsApi)?;
let reattach_req = json_request::<ReAttachRequest>(&mut req).await?; let reattach_req = json_request::<ReAttachRequest>(&mut req).await?;
let state = get_state(&req); let state = get_state(&req);
json_response(StatusCode::OK, state.service.re_attach(reattach_req).await?) json_response(
StatusCode::OK,
state
.service
.re_attach(reattach_req)
.await
.map_err(ApiError::InternalServerError)?,
)
} }
/// Pageserver calls into this before doing deletions, to confirm that it still /// Pageserver calls into this before doing deletions, to confirm that it still
/// holds the latest generation for the tenants with deletions enqueued /// holds the latest generation for the tenants with deletions enqueued
async fn handle_validate(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_validate(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::GenerationsApi)?;
let validate_req = json_request::<ValidateRequest>(&mut req).await?; let validate_req = json_request::<ValidateRequest>(&mut req).await?;
let state = get_state(&req); let state = get_state(&req);
json_response(StatusCode::OK, state.service.validate(validate_req)) json_response(StatusCode::OK, state.service.validate(validate_req))
@@ -95,8 +83,6 @@ async fn handle_validate(mut req: Request<Body>) -> Result<Response<Body>, ApiEr
/// (in the real control plane this is unnecessary, because the same program is managing /// (in the real control plane this is unnecessary, because the same program is managing
/// generation numbers and doing attachments). /// generation numbers and doing attachments).
async fn handle_attach_hook(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_attach_hook(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let attach_req = json_request::<AttachHookRequest>(&mut req).await?; let attach_req = json_request::<AttachHookRequest>(&mut req).await?;
let state = get_state(&req); let state = get_state(&req);
@@ -111,8 +97,6 @@ async fn handle_attach_hook(mut req: Request<Body>) -> Result<Response<Body>, Ap
} }
async fn handle_inspect(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_inspect(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let inspect_req = json_request::<InspectRequest>(&mut req).await?; let inspect_req = json_request::<InspectRequest>(&mut req).await?;
let state = get_state(&req); let state = get_state(&req);
@@ -124,155 +108,8 @@ async fn handle_tenant_create(
service: Arc<Service>, service: Arc<Service>,
mut req: Request<Body>, mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> { ) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::PageServerApi)?;
let create_req = json_request::<TenantCreateRequest>(&mut req).await?; let create_req = json_request::<TenantCreateRequest>(&mut req).await?;
json_response(StatusCode::OK, service.tenant_create(create_req).await?)
json_response(
StatusCode::CREATED,
service.tenant_create(create_req).await?,
)
}
// For tenant and timeline deletions, which both implement an "initially return 202, then 404 once
// we're done" semantic, we wrap with a retry loop to expose a simpler API upstream. This avoids
// needing to track a "deleting" state for tenants.
async fn deletion_wrapper<R, F>(service: Arc<Service>, f: F) -> Result<Response<Body>, ApiError>
where
R: std::future::Future<Output = Result<StatusCode, ApiError>> + Send + 'static,
F: Fn(Arc<Service>) -> R + Send + Sync + 'static,
{
let started_at = Instant::now();
// To keep deletion reasonably snappy for small tenants, initially check after 1 second if deletion
// completed.
let mut retry_period = Duration::from_secs(1);
// On subsequent retries, wait longer.
let max_retry_period = Duration::from_secs(5);
// Enable callers with a 30 second request timeout to reliably get a response
let max_wait = Duration::from_secs(25);
loop {
let status = f(service.clone()).await?;
match status {
StatusCode::ACCEPTED => {
tracing::info!("Deletion accepted, waiting to try again...");
tokio::time::sleep(retry_period).await;
retry_period = max_retry_period;
}
StatusCode::NOT_FOUND => {
tracing::info!("Deletion complete");
return json_response(StatusCode::OK, ());
}
_ => {
tracing::warn!("Unexpected status {status}");
return json_response(status, ());
}
}
let now = Instant::now();
if now + retry_period > started_at + max_wait {
tracing::info!("Deletion timed out waiting for 404");
// REQUEST_TIMEOUT would be more appropriate, but CONFLICT is already part of
// the pageserver's swagger definition for this endpoint, and has the same desired
// effect of causing the control plane to retry later.
return json_response(StatusCode::CONFLICT, ());
}
}
}
async fn handle_tenant_location_config(
service: Arc<Service>,
mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_shard_id: TenantShardId = parse_request_param(&req, "tenant_shard_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let config_req = json_request::<TenantLocationConfigRequest>(&mut req).await?;
json_response(
StatusCode::OK,
service
.tenant_location_config(tenant_shard_id, config_req)
.await?,
)
}
async fn handle_tenant_config_set(
service: Arc<Service>,
mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::PageServerApi)?;
let config_req = json_request::<TenantConfigRequest>(&mut req).await?;
json_response(StatusCode::OK, service.tenant_config_set(config_req).await?)
}
async fn handle_tenant_config_get(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
json_response(StatusCode::OK, service.tenant_config_get(tenant_id)?)
}
async fn handle_tenant_time_travel_remote_storage(
service: Arc<Service>,
mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let time_travel_req = json_request::<TenantTimeTravelRequest>(&mut req).await?;
let timestamp_raw = must_get_query_param(&req, "travel_to")?;
let _timestamp = humantime::parse_rfc3339(&timestamp_raw).map_err(|_e| {
ApiError::BadRequest(anyhow::anyhow!(
"Invalid time for travel_to: {timestamp_raw:?}"
))
})?;
let done_if_after_raw = must_get_query_param(&req, "done_if_after")?;
let _done_if_after = humantime::parse_rfc3339(&done_if_after_raw).map_err(|_e| {
ApiError::BadRequest(anyhow::anyhow!(
"Invalid time for done_if_after: {done_if_after_raw:?}"
))
})?;
service
.tenant_time_travel_remote_storage(
&time_travel_req,
tenant_id,
timestamp_raw,
done_if_after_raw,
)
.await?;
json_response(StatusCode::OK, ())
}
async fn handle_tenant_secondary_download(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
let wait = parse_query_param(&req, "wait_ms")?.map(Duration::from_millis);
let (status, progress) = service.tenant_secondary_download(tenant_id, wait).await?;
json_response(status, progress)
}
async fn handle_tenant_delete(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
deletion_wrapper(service, move |service| async move {
service.tenant_delete(tenant_id).await
})
.await
} }
async fn handle_tenant_timeline_create( async fn handle_tenant_timeline_create(
@@ -280,151 +117,31 @@ async fn handle_tenant_timeline_create(
mut req: Request<Body>, mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> { ) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let create_req = json_request::<TimelineCreateRequest>(&mut req).await?; let create_req = json_request::<TimelineCreateRequest>(&mut req).await?;
json_response( json_response(
StatusCode::CREATED, StatusCode::OK,
service service
.tenant_timeline_create(tenant_id, create_req) .tenant_timeline_create(tenant_id, create_req)
.await?, .await?,
) )
} }
async fn handle_tenant_timeline_delete(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?;
deletion_wrapper(service, move |service| async move {
service.tenant_timeline_delete(tenant_id, timeline_id).await
})
.await
}
async fn handle_tenant_timeline_passthrough(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let Some(path) = req.uri().path_and_query() else {
// This should never happen, our request router only calls us if there is a path
return Err(ApiError::BadRequest(anyhow::anyhow!("Missing path")));
};
tracing::info!("Proxying request for tenant {} ({})", tenant_id, path);
// Find the node that holds shard zero
let (node, tenant_shard_id) = service.tenant_shard0_node(tenant_id)?;
// Callers will always pass an unsharded tenant ID. Before proxying, we must
// rewrite this to a shard-aware shard zero ID.
let path = format!("{}", path);
let tenant_str = tenant_id.to_string();
let tenant_shard_str = format!("{}", tenant_shard_id);
let path = path.replace(&tenant_str, &tenant_shard_str);
let latency = &METRICS_REGISTRY
.metrics_group
.storage_controller_passthrough_request_latency;
// This is a bit awkward. We remove the param from the request
// and join the words by '_' to get a label for the request.
let just_path = path.replace(&tenant_shard_str, "");
let path_label = just_path
.split('/')
.filter(|token| !token.is_empty())
.collect::<Vec<_>>()
.join("_");
let labels = PageserverRequestLabelGroup {
pageserver_id: &node.get_id().to_string(),
path: &path_label,
method: crate::metrics::Method::Get,
};
let _timer = latency.start_timer(labels.clone());
let client = mgmt_api::Client::new(node.base_url(), service.get_config().jwt_token.as_deref());
let resp = client.get_raw(path).await.map_err(|_e|
// FIXME: give APiError a proper Unavailable variant. We return 503 here because
// if we can't successfully send a request to the pageserver, we aren't available.
ApiError::ShuttingDown)?;
if !resp.status().is_success() {
let error_counter = &METRICS_REGISTRY
.metrics_group
.storage_controller_passthrough_request_error;
error_counter.inc(labels);
}
// We have a reqest::Response, would like a http::Response
let mut builder = hyper::Response::builder()
.status(resp.status())
.version(resp.version());
for (k, v) in resp.headers() {
builder = builder.header(k, v);
}
let response = builder
.body(Body::wrap_stream(resp.bytes_stream()))
.map_err(|e| ApiError::InternalServerError(e.into()))?;
Ok(response)
}
async fn handle_tenant_locate( async fn handle_tenant_locate(
service: Arc<Service>, service: Arc<Service>,
req: Request<Body>, req: Request<Body>,
) -> Result<Response<Body>, ApiError> { ) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
json_response(StatusCode::OK, service.tenant_locate(tenant_id)?) json_response(StatusCode::OK, service.tenant_locate(tenant_id)?)
} }
async fn handle_tenant_describe(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
json_response(StatusCode::OK, service.tenant_describe(tenant_id)?)
}
async fn handle_node_register(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_node_register(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let register_req = json_request::<NodeRegisterRequest>(&mut req).await?; let register_req = json_request::<NodeRegisterRequest>(&mut req).await?;
let state = get_state(&req); let state = get_state(&req);
state.service.node_register(register_req).await?; state.service.node_register(register_req).await?;
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
} }
async fn handle_node_list(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
json_response(StatusCode::OK, state.service.node_list().await?)
}
async fn handle_node_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
let node_id: NodeId = parse_request_param(&req, "node_id")?;
json_response(StatusCode::OK, state.service.node_drop(node_id).await?)
}
async fn handle_node_configure(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_node_configure(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let node_id: NodeId = parse_request_param(&req, "node_id")?; let node_id: NodeId = parse_request_param(&req, "node_id")?;
let config_req = json_request::<NodeConfigureRequest>(&mut req).await?; let config_req = json_request::<NodeConfigureRequest>(&mut req).await?;
if node_id != config_req.node_id { if node_id != config_req.node_id {
@@ -434,40 +151,13 @@ async fn handle_node_configure(mut req: Request<Body>) -> Result<Response<Body>,
} }
let state = get_state(&req); let state = get_state(&req);
json_response( json_response(StatusCode::OK, state.service.node_configure(config_req)?)
StatusCode::OK,
state
.service
.node_configure(
config_req.node_id,
config_req.availability.map(NodeAvailability::from),
config_req.scheduling,
)
.await?,
)
}
async fn handle_tenant_shard_split(
service: Arc<Service>,
mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
let split_req = json_request::<TenantShardSplitRequest>(&mut req).await?;
json_response(
StatusCode::OK,
service.tenant_shard_split(tenant_id, split_req).await?,
)
} }
async fn handle_tenant_shard_migrate( async fn handle_tenant_shard_migrate(
service: Arc<Service>, service: Arc<Service>,
mut req: Request<Body>, mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> { ) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let tenant_shard_id: TenantShardId = parse_request_param(&req, "tenant_shard_id")?; let tenant_shard_id: TenantShardId = parse_request_param(&req, "tenant_shard_id")?;
let migrate_req = json_request::<TenantShardMigrateRequest>(&mut req).await?; let migrate_req = json_request::<TenantShardMigrateRequest>(&mut req).await?;
json_response( json_response(
@@ -478,53 +168,11 @@ async fn handle_tenant_shard_migrate(
) )
} }
async fn handle_tenant_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let state = get_state(&req);
json_response(StatusCode::OK, state.service.tenant_drop(tenant_id).await?)
}
async fn handle_tenants_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
state.service.tenants_dump()
}
async fn handle_scheduler_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
state.service.scheduler_dump()
}
async fn handle_consistency_check(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
json_response(StatusCode::OK, state.service.consistency_check().await?)
}
/// Status endpoint is just used for checking that our HTTP listener is up /// Status endpoint is just used for checking that our HTTP listener is up
async fn handle_status(_req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_status(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
} }
/// Readiness endpoint indicates when we're done doing startup I/O (e.g. reconciling
/// with remote pageserver nodes). This is intended for use as a kubernetes readiness probe.
async fn handle_ready(req: Request<Body>) -> Result<Response<Body>, ApiError> {
let state = get_state(&req);
if state.service.startup_complete.is_ready() {
json_response(StatusCode::OK, ())
} else {
json_response(StatusCode::SERVICE_UNAVAILABLE, ())
}
}
impl From<ReconcileError> for ApiError { impl From<ReconcileError> for ApiError {
fn from(value: ReconcileError) -> Self { fn from(value: ReconcileError) -> Self {
ApiError::Conflict(format!("Reconciliation error: {}", value)) ApiError::Conflict(format!("Reconciliation error: {}", value))
@@ -533,11 +181,7 @@ impl From<ReconcileError> for ApiError {
/// Common wrapper for request handlers that call into Service and will operate on tenants: they must only /// Common wrapper for request handlers that call into Service and will operate on tenants: they must only
/// be allowed to run if Service has finished its initial reconciliation. /// be allowed to run if Service has finished its initial reconciliation.
async fn tenant_service_handler<R, H>( async fn tenant_service_handler<R, H>(request: Request<Body>, handler: H) -> R::Output
request: Request<Body>,
handler: H,
request_name: RequestName,
) -> R::Output
where where
R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static, R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
H: FnOnce(Arc<Service>, Request<Body>) -> R + Send + Sync + 'static, H: FnOnce(Arc<Service>, Request<Body>) -> R + Send + Sync + 'static,
@@ -557,112 +201,18 @@ where
)); ));
} }
named_request_span( request_span(
request, request,
|request| async move { handler(service, request).await }, |request| async move { handler(service, request).await },
request_name,
) )
.await .await
} }
fn check_permissions(request: &Request<Body>, required_scope: Scope) -> Result<(), ApiError> {
check_permission_with(request, |claims| {
crate::auth::check_permission(claims, required_scope)
})
}
#[derive(Clone, Debug)]
struct RequestMeta {
method: hyper::http::Method,
at: Instant,
}
fn prologue_metrics_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
) -> Middleware<B, ApiError> {
Middleware::pre(move |req| async move {
let meta = RequestMeta {
method: req.method().clone(),
at: Instant::now(),
};
req.set_context(meta);
Ok(req)
})
}
fn epilogue_metrics_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
) -> Middleware<B, ApiError> {
Middleware::post_with_info(move |resp, req_info| async move {
let request_name = match req_info.context::<RequestName>() {
Some(name) => name,
None => {
return Ok(resp);
}
};
if let Some(meta) = req_info.context::<RequestMeta>() {
let status = &crate::metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_http_request_status;
let latency = &crate::metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_http_request_latency;
status.inc(HttpRequestStatusLabelGroup {
path: request_name.0,
method: meta.method.clone().into(),
status: crate::metrics::StatusCode(resp.status()),
});
latency.observe(
HttpRequestLatencyLabelGroup {
path: request_name.0,
method: meta.method.into(),
},
meta.at.elapsed().as_secs_f64(),
);
}
Ok(resp)
})
}
pub async fn measured_metrics_handler(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
pub const TEXT_FORMAT: &str = "text/plain; version=0.0.4";
let payload = crate::metrics::METRICS_REGISTRY.encode();
let response = Response::builder()
.status(200)
.header(CONTENT_TYPE, TEXT_FORMAT)
.body(payload.into())
.unwrap();
Ok(response)
}
#[derive(Clone)]
struct RequestName(&'static str);
async fn named_request_span<R, H>(
request: Request<Body>,
handler: H,
name: RequestName,
) -> R::Output
where
R: Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
H: FnOnce(Request<Body>) -> R + Send + Sync + 'static,
{
request.set_context(name);
request_span(request, handler).await
}
pub fn make_router( pub fn make_router(
service: Arc<Service>, service: Arc<Service>,
auth: Option<Arc<SwappableJwtAuth>>, auth: Option<Arc<SwappableJwtAuth>>,
) -> RouterBuilder<hyper::Body, ApiError> { ) -> RouterBuilder<hyper::Body, ApiError> {
let mut router = endpoint::make_router() let mut router = endpoint::make_router();
.middleware(prologue_metrics_middleware())
.middleware(epilogue_metrics_middleware());
if auth.is_some() { if auth.is_some() {
router = router.middleware(auth_middleware(|request| { router = router.middleware(auth_middleware(|request| {
let state = get_state(request); let state = get_state(request);
@@ -671,166 +221,34 @@ pub fn make_router(
} else { } else {
state.auth.as_deref() state.auth.as_deref()
} }
})); }))
} }
router router
.data(Arc::new(HttpState::new(service, auth))) .data(Arc::new(HttpState::new(service, auth)))
.get("/metrics", |r| { .get("/status", |r| request_span(r, handle_status))
named_request_span(r, measured_metrics_handler, RequestName("metrics")) .post("/re-attach", |r| request_span(r, handle_re_attach))
.post("/validate", |r| request_span(r, handle_validate))
.post("/attach-hook", |r| request_span(r, handle_attach_hook))
.post("/inspect", |r| request_span(r, handle_inspect))
.post("/node", |r| request_span(r, handle_node_register))
.put("/node/:node_id/config", |r| {
request_span(r, handle_node_configure)
}) })
// Non-prefixed generic endpoints (status, metrics)
.get("/status", |r| {
named_request_span(r, handle_status, RequestName("status"))
})
.get("/ready", |r| {
named_request_span(r, handle_ready, RequestName("ready"))
})
// Upcalls for the pageserver: point the pageserver's `control_plane_api` config to this prefix
.post("/upcall/v1/re-attach", |r| {
named_request_span(r, handle_re_attach, RequestName("upcall_v1_reattach"))
})
.post("/upcall/v1/validate", |r| {
named_request_span(r, handle_validate, RequestName("upcall_v1_validate"))
})
// Test/dev/debug endpoints
.post("/debug/v1/attach-hook", |r| {
named_request_span(r, handle_attach_hook, RequestName("debug_v1_attach_hook"))
})
.post("/debug/v1/inspect", |r| {
named_request_span(r, handle_inspect, RequestName("debug_v1_inspect"))
})
.post("/debug/v1/tenant/:tenant_id/drop", |r| {
named_request_span(r, handle_tenant_drop, RequestName("debug_v1_tenant_drop"))
})
.post("/debug/v1/node/:node_id/drop", |r| {
named_request_span(r, handle_node_drop, RequestName("debug_v1_node_drop"))
})
.get("/debug/v1/tenant", |r| {
named_request_span(r, handle_tenants_dump, RequestName("debug_v1_tenant"))
})
.get("/debug/v1/tenant/:tenant_id/locate", |r| {
tenant_service_handler(
r,
handle_tenant_locate,
RequestName("debug_v1_tenant_locate"),
)
})
.get("/debug/v1/scheduler", |r| {
named_request_span(r, handle_scheduler_dump, RequestName("debug_v1_scheduler"))
})
.post("/debug/v1/consistency_check", |r| {
named_request_span(
r,
handle_consistency_check,
RequestName("debug_v1_consistency_check"),
)
})
.put("/debug/v1/failpoints", |r| {
request_span(r, |r| failpoints_handler(r, CancellationToken::new()))
})
// Node operations
.post("/control/v1/node", |r| {
named_request_span(r, handle_node_register, RequestName("control_v1_node"))
})
.get("/control/v1/node", |r| {
named_request_span(r, handle_node_list, RequestName("control_v1_node"))
})
.put("/control/v1/node/:node_id/config", |r| {
named_request_span(
r,
handle_node_configure,
RequestName("control_v1_node_config"),
)
})
// Tenant Shard operations
.put("/control/v1/tenant/:tenant_shard_id/migrate", |r| {
tenant_service_handler(
r,
handle_tenant_shard_migrate,
RequestName("control_v1_tenant_migrate"),
)
})
.put("/control/v1/tenant/:tenant_id/shard_split", |r| {
tenant_service_handler(
r,
handle_tenant_shard_split,
RequestName("control_v1_tenant_shard_split"),
)
})
.get("/control/v1/tenant/:tenant_id", |r| {
tenant_service_handler(
r,
handle_tenant_describe,
RequestName("control_v1_tenant_describe"),
)
})
// Tenant operations
// The ^/v1/ endpoints act as a "Virtual Pageserver", enabling shard-naive clients to call into
// this service to manage tenants that actually consist of many tenant shards, as if they are a single entity.
.post("/v1/tenant", |r| { .post("/v1/tenant", |r| {
tenant_service_handler(r, handle_tenant_create, RequestName("v1_tenant")) tenant_service_handler(r, handle_tenant_create)
})
.delete("/v1/tenant/:tenant_id", |r| {
tenant_service_handler(r, handle_tenant_delete, RequestName("v1_tenant"))
})
.put("/v1/tenant/config", |r| {
tenant_service_handler(r, handle_tenant_config_set, RequestName("v1_tenant_config"))
})
.get("/v1/tenant/:tenant_id/config", |r| {
tenant_service_handler(r, handle_tenant_config_get, RequestName("v1_tenant_config"))
})
.put("/v1/tenant/:tenant_shard_id/location_config", |r| {
tenant_service_handler(
r,
handle_tenant_location_config,
RequestName("v1_tenant_location_config"),
)
})
.put("/v1/tenant/:tenant_id/time_travel_remote_storage", |r| {
tenant_service_handler(
r,
handle_tenant_time_travel_remote_storage,
RequestName("v1_tenant_time_travel_remote_storage"),
)
})
.post("/v1/tenant/:tenant_id/secondary/download", |r| {
tenant_service_handler(
r,
handle_tenant_secondary_download,
RequestName("v1_tenant_secondary_download"),
)
})
// Timeline operations
.delete("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| {
tenant_service_handler(
r,
handle_tenant_timeline_delete,
RequestName("v1_tenant_timeline"),
)
}) })
.post("/v1/tenant/:tenant_id/timeline", |r| { .post("/v1/tenant/:tenant_id/timeline", |r| {
tenant_service_handler( tenant_service_handler(r, handle_tenant_timeline_create)
r,
handle_tenant_timeline_create,
RequestName("v1_tenant_timeline"),
)
}) })
// Tenant detail GET passthrough to shard zero .get("/tenant/:tenant_id/locate", |r| {
.get("/v1/tenant/:tenant_id", |r| { tenant_service_handler(r, handle_tenant_locate)
tenant_service_handler(
r,
handle_tenant_timeline_passthrough,
RequestName("v1_tenant_passthrough"),
)
}) })
// Timeline GET passthrough to shard zero. Note that the `*` in the URL is a wildcard: any future .put("/tenant/:tenant_shard_id/migrate", |r| {
// timeline GET APIs will be implicitly included. tenant_service_handler(r, handle_tenant_shard_migrate)
.get("/v1/tenant/:tenant_id/timeline*", |r| {
tenant_service_handler(
r,
handle_tenant_timeline_passthrough,
RequestName("v1_tenant_timeline_passthrough"),
)
}) })
// Path aliases for tests_forward_compatibility
// TODO: remove these in future PR
.post("/re-attach", |r| request_span(r, handle_re_attach))
.post("/validate", |r| request_span(r, handle_validate))
} }

View File

@@ -1,54 +0,0 @@
use std::{collections::HashMap, sync::Arc};
/// A map of locks covering some arbitrary identifiers. Useful if you have a collection of objects but don't
/// want to embed a lock in each one, or if your locking granularity is different to your object granularity.
/// For example, used in the storage controller where the objects are tenant shards, but sometimes locking
/// is needed at a tenant-wide granularity.
pub(crate) struct IdLockMap<T>
where
T: Eq + PartialEq + std::hash::Hash,
{
/// A synchronous lock for getting/setting the async locks that our callers will wait on.
entities: std::sync::Mutex<std::collections::HashMap<T, Arc<tokio::sync::RwLock<()>>>>,
}
impl<T> IdLockMap<T>
where
T: Eq + PartialEq + std::hash::Hash,
{
pub(crate) fn shared(
&self,
key: T,
) -> impl std::future::Future<Output = tokio::sync::OwnedRwLockReadGuard<()>> {
let mut locked = self.entities.lock().unwrap();
let entry = locked.entry(key).or_default();
entry.clone().read_owned()
}
pub(crate) fn exclusive(
&self,
key: T,
) -> impl std::future::Future<Output = tokio::sync::OwnedRwLockWriteGuard<()>> {
let mut locked = self.entities.lock().unwrap();
let entry = locked.entry(key).or_default();
entry.clone().write_owned()
}
/// Rather than building a lock guard that re-takes the [`Self::entities`] lock, we just do
/// periodic housekeeping to avoid the map growing indefinitely
pub(crate) fn housekeeping(&self) {
let mut locked = self.entities.lock().unwrap();
locked.retain(|_k, lock| lock.try_write().is_err())
}
}
impl<T> Default for IdLockMap<T>
where
T: Eq + PartialEq + std::hash::Hash,
{
fn default() -> Self {
Self {
entities: std::sync::Mutex::new(HashMap::new()),
}
}
}

View File

@@ -1,14 +1,9 @@
use serde::Serialize; use serde::{Deserialize, Serialize};
use utils::seqwait::MonotonicCounter; use utils::seqwait::MonotonicCounter;
mod auth;
mod compute_hook; mod compute_hook;
mod heartbeater;
pub mod http; pub mod http;
mod id_lock_map;
pub mod metrics;
mod node; mod node;
mod pageserver_client;
pub mod persistence; pub mod persistence;
mod reconciler; mod reconciler;
mod scheduler; mod scheduler;
@@ -16,7 +11,18 @@ mod schema;
pub mod service; pub mod service;
mod tenant_state; mod tenant_state;
#[derive(Ord, PartialOrd, Eq, PartialEq, Copy, Clone, Serialize)] #[derive(Clone, Serialize, Deserialize)]
enum PlacementPolicy {
/// Cheapest way to attach a tenant: just one pageserver, no secondary
Single,
/// Production-ready way to attach a tenant: one attached pageserver and
/// some number of secondaries.
Double(usize),
/// Do not attach to any pageservers
Detached,
}
#[derive(Ord, PartialOrd, Eq, PartialEq, Copy, Clone)]
struct Sequence(u64); struct Sequence(u64);
impl Sequence { impl Sequence {
@@ -31,12 +37,6 @@ impl std::fmt::Display for Sequence {
} }
} }
impl std::fmt::Debug for Sequence {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl MonotonicCounter<Sequence> for Sequence { impl MonotonicCounter<Sequence> for Sequence {
fn cnt_advance(&mut self, v: Sequence) { fn cnt_advance(&mut self, v: Sequence) {
assert!(*self <= v); assert!(*self <= v);
@@ -52,3 +52,9 @@ impl Sequence {
Sequence(self.0 + 1) Sequence(self.0 + 1)
} }
} }
impl Default for PlacementPolicy {
fn default() -> Self {
PlacementPolicy::Double(1)
}
}

View File

@@ -1,15 +1,18 @@
use anyhow::{anyhow, Context}; /// The attachment service mimics the aspects of the control plane API
/// that are required for a pageserver to operate.
///
/// This enables running & testing pageservers without a full-blown
/// deployment of the Neon cloud platform.
///
use anyhow::anyhow;
use attachment_service::http::make_router; use attachment_service::http::make_router;
use attachment_service::metrics::preinitialize_metrics;
use attachment_service::persistence::Persistence; use attachment_service::persistence::Persistence;
use attachment_service::service::{Config, Service, MAX_UNAVAILABLE_INTERVAL_DEFAULT}; use attachment_service::service::{Config, Service};
use camino::Utf8PathBuf; use camino::Utf8PathBuf;
use clap::Parser; use clap::Parser;
use diesel::Connection;
use metrics::launch_timestamp::LaunchTimestamp; use metrics::launch_timestamp::LaunchTimestamp;
use std::sync::Arc; use std::sync::Arc;
use tokio::signal::unix::SignalKind; use tokio::signal::unix::SignalKind;
use tokio_util::sync::CancellationToken;
use utils::auth::{JwtAuth, SwappableJwtAuth}; use utils::auth::{JwtAuth, SwappableJwtAuth};
use utils::logging::{self, LogFormat}; use utils::logging::{self, LogFormat};
@@ -18,9 +21,6 @@ use utils::{project_build_tag, project_git_version, tcp_listener};
project_git_version!(GIT_VERSION); project_git_version!(GIT_VERSION);
project_build_tag!(BUILD_TAG); project_build_tag!(BUILD_TAG);
use diesel_migrations::{embed_migrations, EmbeddedMigrations};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
#[derive(Parser)] #[derive(Parser)]
#[command(author, version, about, long_about = None)] #[command(author, version, about, long_about = None)]
#[command(arg_required_else_help(true))] #[command(arg_required_else_help(true))]
@@ -29,146 +29,25 @@ struct Cli {
#[arg(short, long)] #[arg(short, long)]
listen: std::net::SocketAddr, listen: std::net::SocketAddr,
/// Public key for JWT authentication of clients /// Path to public key for JWT authentication of clients
#[arg(long)] #[arg(long)]
public_key: Option<String>, public_key: Option<camino::Utf8PathBuf>,
/// Token for authenticating this service with the pageservers it controls /// Token for authenticating this service with the pageservers it controls
#[arg(long)] #[arg(short, long)]
jwt_token: Option<String>, jwt_token: Option<String>,
/// Token for authenticating this service with the control plane, when calling
/// the compute notification endpoint
#[arg(long)]
control_plane_jwt_token: Option<String>,
/// URL to control plane compute notification endpoint
#[arg(long)]
compute_hook_url: Option<String>,
/// Path to the .json file to store state (will be created if it doesn't exist) /// Path to the .json file to store state (will be created if it doesn't exist)
#[arg(short, long)] #[arg(short, long)]
path: Option<Utf8PathBuf>, path: Utf8PathBuf,
/// URL to connect to postgres, like postgresql://localhost:1234/attachment_service /// URL to connect to postgres, like postgresql://localhost:1234/attachment_service
#[arg(long)] #[arg(long)]
database_url: Option<String>,
/// Flag to enable dev mode, which permits running without auth
#[arg(long, default_value = "false")]
dev: bool,
/// Grace period before marking unresponsive pageserver offline
#[arg(long)]
max_unavailable_interval: Option<humantime::Duration>,
}
enum StrictMode {
/// In strict mode, we will require that all secrets are loaded, i.e. security features
/// may not be implicitly turned off by omitting secrets in the environment.
Strict,
/// In dev mode, secrets are optional, and omitting a particular secret will implicitly
/// disable the auth related to it (e.g. no pageserver jwt key -> send unauthenticated
/// requests, no public key -> don't authenticate incoming requests).
Dev,
}
impl Default for StrictMode {
fn default() -> Self {
Self::Strict
}
}
/// Secrets may either be provided on the command line (for testing), or loaded from AWS SecretManager: this
/// type encapsulates the logic to decide which and do the loading.
struct Secrets {
database_url: String, database_url: String,
public_key: Option<JwtAuth>,
jwt_token: Option<String>,
control_plane_jwt_token: Option<String>,
} }
impl Secrets { #[tokio::main]
const DATABASE_URL_ENV: &'static str = "DATABASE_URL"; async fn main() -> anyhow::Result<()> {
const PAGESERVER_JWT_TOKEN_ENV: &'static str = "PAGESERVER_JWT_TOKEN";
const CONTROL_PLANE_JWT_TOKEN_ENV: &'static str = "CONTROL_PLANE_JWT_TOKEN";
const PUBLIC_KEY_ENV: &'static str = "PUBLIC_KEY";
/// Load secrets from, in order of preference:
/// - CLI args if database URL is provided on the CLI
/// - Environment variables if DATABASE_URL is set.
/// - AWS Secrets Manager secrets
async fn load(args: &Cli) -> anyhow::Result<Self> {
let Some(database_url) =
Self::load_secret(&args.database_url, Self::DATABASE_URL_ENV).await
else {
anyhow::bail!(
"Database URL is not set (set `--database-url`, or `DATABASE_URL` environment)"
)
};
let public_key = match Self::load_secret(&args.public_key, Self::PUBLIC_KEY_ENV).await {
Some(v) => Some(JwtAuth::from_key(v).context("Loading public key")?),
None => None,
};
let this = Self {
database_url,
public_key,
jwt_token: Self::load_secret(&args.jwt_token, Self::PAGESERVER_JWT_TOKEN_ENV).await,
control_plane_jwt_token: Self::load_secret(
&args.control_plane_jwt_token,
Self::CONTROL_PLANE_JWT_TOKEN_ENV,
)
.await,
};
Ok(this)
}
async fn load_secret(cli: &Option<String>, env_name: &str) -> Option<String> {
if let Some(v) = cli {
Some(v.clone())
} else if let Ok(v) = std::env::var(env_name) {
Some(v)
} else {
None
}
}
}
/// Execute the diesel migrations that are built into this binary
async fn migration_run(database_url: &str) -> anyhow::Result<()> {
use diesel::PgConnection;
use diesel_migrations::{HarnessWithOutput, MigrationHarness};
let mut conn = PgConnection::establish(database_url)?;
HarnessWithOutput::write_to_stdout(&mut conn)
.run_pending_migrations(MIGRATIONS)
.map(|_| ())
.map_err(|e| anyhow::anyhow!(e))?;
Ok(())
}
fn main() -> anyhow::Result<()> {
let default_panic = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
default_panic(info);
std::process::exit(1);
}));
tokio::runtime::Builder::new_current_thread()
// We use spawn_blocking for database operations, so require approximately
// as many blocking threads as we will open database connections.
.max_blocking_threads(Persistence::MAX_CONNECTIONS as usize)
.enable_all()
.build()
.unwrap()
.block_on(async_main())
}
async fn async_main() -> anyhow::Result<()> {
let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate())); let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
logging::init( logging::init(
@@ -177,96 +56,46 @@ async fn async_main() -> anyhow::Result<()> {
logging::Output::Stdout, logging::Output::Stdout,
)?; )?;
preinitialize_metrics();
let args = Cli::parse(); let args = Cli::parse();
tracing::info!( tracing::info!(
"version: {}, launch_timestamp: {}, build_tag {}, state at {}, listening on {}", "version: {}, launch_timestamp: {}, build_tag {}, state at {}, listening on {}",
GIT_VERSION, GIT_VERSION,
launch_ts.to_string(), launch_ts.to_string(),
BUILD_TAG, BUILD_TAG,
args.path.as_ref().unwrap_or(&Utf8PathBuf::from("<none>")), args.path,
args.listen args.listen
); );
let strict_mode = if args.dev {
StrictMode::Dev
} else {
StrictMode::Strict
};
let secrets = Secrets::load(&args).await?;
// Validate required secrets and arguments are provided in strict mode
match strict_mode {
StrictMode::Strict
if (secrets.public_key.is_none()
|| secrets.jwt_token.is_none()
|| secrets.control_plane_jwt_token.is_none()) =>
{
// Production systems should always have secrets configured: if public_key was not set
// then we would implicitly disable auth.
anyhow::bail!(
"Insecure config! One or more secrets is not set. This is only permitted in `--dev` mode"
);
}
StrictMode::Strict if args.compute_hook_url.is_none() => {
// Production systems should always have a compute hook set, to prevent falling
// back to trying to use neon_local.
anyhow::bail!(
"`--compute-hook-url` is not set: this is only permitted in `--dev` mode"
);
}
StrictMode::Strict => {
tracing::info!("Starting in strict mode: configuration is OK.")
}
StrictMode::Dev => {
tracing::warn!("Starting in dev mode: this may be an insecure configuration.")
}
}
let config = Config { let config = Config {
jwt_token: secrets.jwt_token, jwt_token: args.jwt_token,
control_plane_jwt_token: secrets.control_plane_jwt_token,
compute_hook_url: args.compute_hook_url,
max_unavailable_interval: args
.max_unavailable_interval
.map(humantime::Duration::into)
.unwrap_or(MAX_UNAVAILABLE_INTERVAL_DEFAULT),
}; };
// After loading secrets & config, but before starting anything else, apply database migrations let json_path = if args.path.as_os_str().is_empty() {
migration_run(&secrets.database_url) None
.await } else {
.context("Running database migrations")?; Some(args.path)
};
let json_path = args.path; let persistence = Arc::new(Persistence::new(args.database_url, json_path.clone()));
let persistence = Arc::new(Persistence::new(secrets.database_url, json_path.clone()));
let service = Service::spawn(config, persistence.clone()).await?; let service = Service::spawn(config, persistence.clone()).await?;
let http_listener = tcp_listener::bind(args.listen)?; let http_listener = tcp_listener::bind(args.listen)?;
let auth = secrets let auth = if let Some(public_key_path) = &args.public_key {
.public_key let jwt_auth = JwtAuth::from_key_path(public_key_path)?;
.map(|jwt_auth| Arc::new(SwappableJwtAuth::new(jwt_auth))); Some(Arc::new(SwappableJwtAuth::new(jwt_auth)))
let router = make_router(service.clone(), auth) } else {
None
};
let router = make_router(service, auth)
.build() .build()
.map_err(|err| anyhow!(err))?; .map_err(|err| anyhow!(err))?;
let router_service = utils::http::RouterService::new(router).unwrap(); let router_service = utils::http::RouterService::new(router).unwrap();
let server = hyper::Server::from_tcp(http_listener)?.serve(router_service);
// Start HTTP server
let server_shutdown = CancellationToken::new();
let server = hyper::Server::from_tcp(http_listener)?
.serve(router_service)
.with_graceful_shutdown({
let server_shutdown = server_shutdown.clone();
async move {
server_shutdown.cancelled().await;
}
});
tracing::info!("Serving on {0}", args.listen); tracing::info!("Serving on {0}", args.listen);
let server_task = tokio::task::spawn(server);
tokio::task::spawn(server);
// Wait until we receive a signal // Wait until we receive a signal
let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())?; let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())?;
@@ -287,16 +116,5 @@ async fn async_main() -> anyhow::Result<()> {
} }
} }
// Stop HTTP server first, so that we don't have to service requests
// while shutting down Service
server_shutdown.cancel();
if let Err(e) = server_task.await {
tracing::error!("Error joining HTTP server task: {e}")
}
tracing::info!("Joined HTTP server task");
service.shutdown().await;
tracing::info!("Service shutdown complete");
std::process::exit(0); std::process::exit(0);
} }

View File

@@ -1,284 +0,0 @@
//!
//! This module provides metric definitions for the storage controller.
//!
//! All metrics are grouped in [`StorageControllerMetricGroup`]. [`StorageControllerMetrics`] holds
//! the mentioned metrics and their encoder. It's globally available via the [`METRICS_REGISTRY`]
//! constant.
//!
//! The rest of the code defines label group types and deals with converting outer types to labels.
//!
use bytes::Bytes;
use measured::{
label::{LabelValue, StaticLabelSet},
FixedCardinalityLabel, MetricGroup,
};
use once_cell::sync::Lazy;
use std::sync::Mutex;
use crate::persistence::{DatabaseError, DatabaseOperation};
pub(crate) static METRICS_REGISTRY: Lazy<StorageControllerMetrics> =
Lazy::new(StorageControllerMetrics::default);
pub fn preinitialize_metrics() {
Lazy::force(&METRICS_REGISTRY);
}
pub(crate) struct StorageControllerMetrics {
pub(crate) metrics_group: StorageControllerMetricGroup,
encoder: Mutex<measured::text::TextEncoder>,
}
#[derive(measured::MetricGroup)]
pub(crate) struct StorageControllerMetricGroup {
/// Count of how many times we spawn a reconcile task
pub(crate) storage_controller_reconcile_spawn: measured::Counter,
/// Reconciler tasks completed, broken down by success/failure/cancelled
pub(crate) storage_controller_reconcile_complete:
measured::CounterVec<ReconcileCompleteLabelGroupSet>,
/// HTTP request status counters for handled requests
pub(crate) storage_controller_http_request_status:
measured::CounterVec<HttpRequestStatusLabelGroupSet>,
/// HTTP request handler latency across all status codes
pub(crate) storage_controller_http_request_latency:
measured::HistogramVec<HttpRequestLatencyLabelGroupSet, 5>,
/// Count of HTTP requests to the pageserver that resulted in an error,
/// broken down by the pageserver node id, request name and method
pub(crate) storage_controller_pageserver_request_error:
measured::CounterVec<PageserverRequestLabelGroupSet>,
/// Latency of HTTP requests to the pageserver, broken down by pageserver
/// node id, request name and method. This include both successful and unsuccessful
/// requests.
pub(crate) storage_controller_pageserver_request_latency:
measured::HistogramVec<PageserverRequestLabelGroupSet, 5>,
/// Count of pass-through HTTP requests to the pageserver that resulted in an error,
/// broken down by the pageserver node id, request name and method
pub(crate) storage_controller_passthrough_request_error:
measured::CounterVec<PageserverRequestLabelGroupSet>,
/// Latency of pass-through HTTP requests to the pageserver, broken down by pageserver
/// node id, request name and method. This include both successful and unsuccessful
/// requests.
pub(crate) storage_controller_passthrough_request_latency:
measured::HistogramVec<PageserverRequestLabelGroupSet, 5>,
/// Count of errors in database queries, broken down by error type and operation.
pub(crate) storage_controller_database_query_error:
measured::CounterVec<DatabaseQueryErrorLabelGroupSet>,
/// Latency of database queries, broken down by operation.
pub(crate) storage_controller_database_query_latency:
measured::HistogramVec<DatabaseQueryLatencyLabelGroupSet, 5>,
}
impl StorageControllerMetrics {
pub(crate) fn encode(&self) -> Bytes {
let mut encoder = self.encoder.lock().unwrap();
self.metrics_group.collect_into(&mut *encoder);
encoder.finish()
}
}
impl Default for StorageControllerMetrics {
fn default() -> Self {
Self {
metrics_group: StorageControllerMetricGroup::new(),
encoder: Mutex::new(measured::text::TextEncoder::new()),
}
}
}
impl StorageControllerMetricGroup {
pub(crate) fn new() -> Self {
Self {
storage_controller_reconcile_spawn: measured::Counter::new(),
storage_controller_reconcile_complete: measured::CounterVec::new(
ReconcileCompleteLabelGroupSet {
status: StaticLabelSet::new(),
},
),
storage_controller_http_request_status: measured::CounterVec::new(
HttpRequestStatusLabelGroupSet {
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
status: StaticLabelSet::new(),
},
),
storage_controller_http_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_pageserver_request_error: measured::CounterVec::new(
PageserverRequestLabelGroupSet {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
},
),
storage_controller_pageserver_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_passthrough_request_error: measured::CounterVec::new(
PageserverRequestLabelGroupSet {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
},
),
storage_controller_passthrough_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_database_query_error: measured::CounterVec::new(
DatabaseQueryErrorLabelGroupSet {
operation: StaticLabelSet::new(),
error_type: StaticLabelSet::new(),
},
),
storage_controller_database_query_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
}
}
}
#[derive(measured::LabelGroup)]
#[label(set = ReconcileCompleteLabelGroupSet)]
pub(crate) struct ReconcileCompleteLabelGroup {
pub(crate) status: ReconcileOutcome,
}
#[derive(measured::LabelGroup)]
#[label(set = HttpRequestStatusLabelGroupSet)]
pub(crate) struct HttpRequestStatusLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str,
pub(crate) method: Method,
pub(crate) status: StatusCode,
}
#[derive(measured::LabelGroup)]
#[label(set = HttpRequestLatencyLabelGroupSet)]
pub(crate) struct HttpRequestLatencyLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str,
pub(crate) method: Method,
}
impl Default for HttpRequestLatencyLabelGroupSet {
fn default() -> Self {
Self {
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
}
}
}
#[derive(measured::LabelGroup, Clone)]
#[label(set = PageserverRequestLabelGroupSet)]
pub(crate) struct PageserverRequestLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) pageserver_id: &'a str,
#[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str,
pub(crate) method: Method,
}
impl Default for PageserverRequestLabelGroupSet {
fn default() -> Self {
Self {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
}
}
}
#[derive(measured::LabelGroup)]
#[label(set = DatabaseQueryErrorLabelGroupSet)]
pub(crate) struct DatabaseQueryErrorLabelGroup {
pub(crate) error_type: DatabaseErrorLabel,
pub(crate) operation: DatabaseOperation,
}
#[derive(measured::LabelGroup)]
#[label(set = DatabaseQueryLatencyLabelGroupSet)]
pub(crate) struct DatabaseQueryLatencyLabelGroup {
pub(crate) operation: DatabaseOperation,
}
#[derive(FixedCardinalityLabel)]
pub(crate) enum ReconcileOutcome {
#[label(rename = "ok")]
Success,
Error,
Cancel,
}
#[derive(FixedCardinalityLabel, Clone)]
pub(crate) enum Method {
Get,
Put,
Post,
Delete,
Other,
}
impl From<hyper::Method> for Method {
fn from(value: hyper::Method) -> Self {
if value == hyper::Method::GET {
Method::Get
} else if value == hyper::Method::PUT {
Method::Put
} else if value == hyper::Method::POST {
Method::Post
} else if value == hyper::Method::DELETE {
Method::Delete
} else {
Method::Other
}
}
}
pub(crate) struct StatusCode(pub(crate) hyper::http::StatusCode);
impl LabelValue for StatusCode {
fn visit<V: measured::label::LabelVisitor>(&self, v: V) -> V::Output {
v.write_int(self.0.as_u16() as u64)
}
}
impl FixedCardinalityLabel for StatusCode {
fn cardinality() -> usize {
(100..1000).len()
}
fn encode(&self) -> usize {
self.0.as_u16() as usize
}
fn decode(value: usize) -> Self {
Self(hyper::http::StatusCode::from_u16(u16::try_from(value).unwrap()).unwrap())
}
}
#[derive(FixedCardinalityLabel)]
pub(crate) enum DatabaseErrorLabel {
Query,
Connection,
ConnectionPool,
Logical,
}
impl DatabaseError {
pub(crate) fn error_label(&self) -> DatabaseErrorLabel {
match self {
Self::Query(_) => DatabaseErrorLabel::Query,
Self::Connection(_) => DatabaseErrorLabel::Connection,
Self::ConnectionPool(_) => DatabaseErrorLabel::ConnectionPool,
Self::Logical(_) => DatabaseErrorLabel::Logical,
}
}
}

View File

@@ -1,52 +1,20 @@
use std::{str::FromStr, time::Duration}; use control_plane::attachment_service::{NodeAvailability, NodeSchedulingPolicy};
use utils::id::NodeId;
use hyper::StatusCode; use crate::persistence::NodePersistence;
use pageserver_api::{
controller_api::{
NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, TenantLocateResponseShard,
},
shard::TenantShardId,
};
use pageserver_client::mgmt_api;
use serde::Serialize;
use tokio_util::sync::CancellationToken;
use utils::{backoff, id::NodeId};
use crate::{ #[derive(Clone)]
pageserver_client::PageserverClient, persistence::NodePersistence, scheduler::MaySchedule,
};
/// Represents the in-memory description of a Node.
///
/// Scheduling statistics are maintened separately in [`crate::scheduler`].
///
/// The persistent subset of the Node is defined in [`crate::persistence::NodePersistence`]: the
/// implementation of serialization on this type is only for debug dumps.
#[derive(Clone, Serialize)]
pub(crate) struct Node { pub(crate) struct Node {
id: NodeId, pub(crate) id: NodeId,
availability: NodeAvailability, pub(crate) availability: NodeAvailability,
scheduling: NodeSchedulingPolicy, pub(crate) scheduling: NodeSchedulingPolicy,
listen_http_addr: String, pub(crate) listen_http_addr: String,
listen_http_port: u16, pub(crate) listen_http_port: u16,
listen_pg_addr: String, pub(crate) listen_pg_addr: String,
listen_pg_port: u16, pub(crate) listen_pg_port: u16,
// This cancellation token means "stop any RPCs in flight to this node, and don't start
// any more". It is not related to process shutdown.
#[serde(skip)]
cancel: CancellationToken,
}
/// When updating [`Node::availability`] we use this type to indicate to the caller
/// whether/how they changed it.
pub(crate) enum AvailabilityTransition {
ToActive,
ToOffline,
Unchanged,
} }
impl Node { impl Node {
@@ -54,111 +22,18 @@ impl Node {
format!("http://{}:{}", self.listen_http_addr, self.listen_http_port) format!("http://{}:{}", self.listen_http_addr, self.listen_http_port)
} }
pub(crate) fn get_id(&self) -> NodeId {
self.id
}
pub(crate) fn set_scheduling(&mut self, scheduling: NodeSchedulingPolicy) {
self.scheduling = scheduling
}
/// Does this registration request match `self`? This is used when deciding whether a registration
/// request should be allowed to update an existing record with the same node ID.
pub(crate) fn registration_match(&self, register_req: &NodeRegisterRequest) -> bool {
self.id == register_req.node_id
&& self.listen_http_addr == register_req.listen_http_addr
&& self.listen_http_port == register_req.listen_http_port
&& self.listen_pg_addr == register_req.listen_pg_addr
&& self.listen_pg_port == register_req.listen_pg_port
}
/// For a shard located on this node, populate a response object
/// with this node's address information.
pub(crate) fn shard_location(&self, shard_id: TenantShardId) -> TenantLocateResponseShard {
TenantLocateResponseShard {
shard_id,
node_id: self.id,
listen_http_addr: self.listen_http_addr.clone(),
listen_http_port: self.listen_http_port,
listen_pg_addr: self.listen_pg_addr.clone(),
listen_pg_port: self.listen_pg_port,
}
}
pub(crate) fn set_availability(&mut self, availability: NodeAvailability) {
match self.get_availability_transition(availability) {
AvailabilityTransition::ToActive => {
// Give the node a new cancellation token, effectively resetting it to un-cancelled. Any
// users of previously-cloned copies of the node will still see the old cancellation
// state. For example, Reconcilers in flight will have to complete and be spawned
// again to realize that the node has become available.
self.cancel = CancellationToken::new();
}
AvailabilityTransition::ToOffline => {
// Fire the node's cancellation token to cancel any in-flight API requests to it
self.cancel.cancel();
}
AvailabilityTransition::Unchanged => {}
}
self.availability = availability;
}
/// Without modifying the availability of the node, convert the intended availability
/// into a description of the transition.
pub(crate) fn get_availability_transition(
&self,
availability: NodeAvailability,
) -> AvailabilityTransition {
use AvailabilityTransition::*;
use NodeAvailability::*;
match (self.availability, availability) {
(Offline, Active(_)) => ToActive,
(Active(_), Offline) => ToOffline,
_ => Unchanged,
}
}
/// Whether we may send API requests to this node.
pub(crate) fn is_available(&self) -> bool {
// When we clone a node, [`Self::availability`] is a snapshot, but [`Self::cancel`] holds
// a reference to the original Node's cancellation status. Checking both of these results
// in a "pessimistic" check where we will consider a Node instance unavailable if it was unavailable
// when we cloned it, or if the original Node instance's cancellation token was fired.
matches!(self.availability, NodeAvailability::Active(_)) && !self.cancel.is_cancelled()
}
/// Is this node elegible to have work scheduled onto it? /// Is this node elegible to have work scheduled onto it?
pub(crate) fn may_schedule(&self) -> MaySchedule { pub(crate) fn may_schedule(&self) -> bool {
let score = match self.availability { match self.availability {
NodeAvailability::Active(score) => score, NodeAvailability::Active => {}
NodeAvailability::Offline => return MaySchedule::No, NodeAvailability::Offline => return false,
}; }
match self.scheduling { match self.scheduling {
NodeSchedulingPolicy::Active => MaySchedule::Yes(score), NodeSchedulingPolicy::Active => true,
NodeSchedulingPolicy::Draining => MaySchedule::No, NodeSchedulingPolicy::Draining => false,
NodeSchedulingPolicy::Filling => MaySchedule::Yes(score), NodeSchedulingPolicy::Filling => true,
NodeSchedulingPolicy::Pause => MaySchedule::No, NodeSchedulingPolicy::Pause => false,
}
}
pub(crate) fn new(
id: NodeId,
listen_http_addr: String,
listen_http_port: u16,
listen_pg_addr: String,
listen_pg_port: u16,
) -> Self {
Self {
id,
listen_http_addr,
listen_http_port,
listen_pg_addr,
listen_pg_port,
scheduling: NodeSchedulingPolicy::Filling,
availability: NodeAvailability::Offline,
cancel: CancellationToken::new(),
} }
} }
@@ -172,100 +47,4 @@ impl Node {
listen_pg_port: self.listen_pg_port as i32, listen_pg_port: self.listen_pg_port as i32,
} }
} }
pub(crate) fn from_persistent(np: NodePersistence) -> Self {
Self {
id: NodeId(np.node_id as u64),
// At startup we consider a node offline until proven otherwise.
availability: NodeAvailability::Offline,
scheduling: NodeSchedulingPolicy::from_str(&np.scheduling_policy)
.expect("Bad scheduling policy in DB"),
listen_http_addr: np.listen_http_addr,
listen_http_port: np.listen_http_port as u16,
listen_pg_addr: np.listen_pg_addr,
listen_pg_port: np.listen_pg_port as u16,
cancel: CancellationToken::new(),
}
}
/// Wrapper for issuing requests to pageserver management API: takes care of generic
/// retry/backoff for retryable HTTP status codes.
///
/// This will return None to indicate cancellation. Cancellation may happen from
/// the cancellation token passed in, or from Self's cancellation token (i.e. node
/// going offline).
pub(crate) async fn with_client_retries<T, O, F>(
&self,
mut op: O,
jwt: &Option<String>,
warn_threshold: u32,
max_retries: u32,
timeout: Duration,
cancel: &CancellationToken,
) -> Option<mgmt_api::Result<T>>
where
O: FnMut(PageserverClient) -> F,
F: std::future::Future<Output = mgmt_api::Result<T>>,
{
fn is_fatal(e: &mgmt_api::Error) -> bool {
use mgmt_api::Error::*;
match e {
ReceiveBody(_) | ReceiveErrorBody(_) => false,
ApiError(StatusCode::SERVICE_UNAVAILABLE, _)
| ApiError(StatusCode::GATEWAY_TIMEOUT, _)
| ApiError(StatusCode::REQUEST_TIMEOUT, _) => false,
ApiError(_, _) => true,
Cancelled => true,
}
}
backoff::retry(
|| {
let http_client = reqwest::ClientBuilder::new()
.timeout(timeout)
.build()
.expect("Failed to construct HTTP client");
let client = PageserverClient::from_client(
self.get_id(),
http_client,
self.base_url(),
jwt.as_deref(),
);
let node_cancel_fut = self.cancel.cancelled();
let op_fut = op(client);
async {
tokio::select! {
r = op_fut=> {r},
_ = node_cancel_fut => {
Err(mgmt_api::Error::Cancelled)
}}
}
},
is_fatal,
warn_threshold,
max_retries,
&format!(
"Call to node {} ({}:{}) management API",
self.id, self.listen_http_addr, self.listen_http_port
),
cancel,
)
.await
}
}
impl std::fmt::Display for Node {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} ({})", self.id, self.listen_http_addr)
}
}
impl std::fmt::Debug for Node {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} ({})", self.id, self.listen_http_addr)
}
} }

View File

@@ -1,203 +0,0 @@
use pageserver_api::{
models::{
LocationConfig, LocationConfigListResponse, PageserverUtilization, SecondaryProgress,
TenantShardSplitRequest, TenantShardSplitResponse, TimelineCreateRequest, TimelineInfo,
},
shard::TenantShardId,
};
use pageserver_client::mgmt_api::{Client, Result};
use reqwest::StatusCode;
use utils::id::{NodeId, TimelineId};
/// Thin wrapper around [`pageserver_client::mgmt_api::Client`]. It allows the storage
/// controller to collect metrics in a non-intrusive manner.
#[derive(Debug, Clone)]
pub(crate) struct PageserverClient {
inner: Client,
node_id_label: String,
}
macro_rules! measured_request {
($name:literal, $method:expr, $node_id: expr, $invoke:expr) => {{
let labels = crate::metrics::PageserverRequestLabelGroup {
pageserver_id: $node_id,
path: $name,
method: $method,
};
let latency = &crate::metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_pageserver_request_latency;
let _timer_guard = latency.start_timer(labels.clone());
let res = $invoke;
if res.is_err() {
let error_counters = &crate::metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_pageserver_request_error;
error_counters.inc(labels)
}
res
}};
}
impl PageserverClient {
pub(crate) fn new(node_id: NodeId, mgmt_api_endpoint: String, jwt: Option<&str>) -> Self {
Self {
inner: Client::from_client(reqwest::Client::new(), mgmt_api_endpoint, jwt),
node_id_label: node_id.0.to_string(),
}
}
pub(crate) fn from_client(
node_id: NodeId,
raw_client: reqwest::Client,
mgmt_api_endpoint: String,
jwt: Option<&str>,
) -> Self {
Self {
inner: Client::from_client(raw_client, mgmt_api_endpoint, jwt),
node_id_label: node_id.0.to_string(),
}
}
pub(crate) async fn tenant_delete(&self, tenant_shard_id: TenantShardId) -> Result<StatusCode> {
measured_request!(
"tenant",
crate::metrics::Method::Delete,
&self.node_id_label,
self.inner.tenant_delete(tenant_shard_id).await
)
}
pub(crate) async fn tenant_time_travel_remote_storage(
&self,
tenant_shard_id: TenantShardId,
timestamp: &str,
done_if_after: &str,
) -> Result<()> {
measured_request!(
"tenant_time_travel_remote_storage",
crate::metrics::Method::Put,
&self.node_id_label,
self.inner
.tenant_time_travel_remote_storage(tenant_shard_id, timestamp, done_if_after)
.await
)
}
pub(crate) async fn tenant_secondary_download(
&self,
tenant_id: TenantShardId,
wait: Option<std::time::Duration>,
) -> Result<(StatusCode, SecondaryProgress)> {
measured_request!(
"tenant_secondary_download",
crate::metrics::Method::Post,
&self.node_id_label,
self.inner.tenant_secondary_download(tenant_id, wait).await
)
}
pub(crate) async fn location_config(
&self,
tenant_shard_id: TenantShardId,
config: LocationConfig,
flush_ms: Option<std::time::Duration>,
lazy: bool,
) -> Result<()> {
measured_request!(
"location_config",
crate::metrics::Method::Put,
&self.node_id_label,
self.inner
.location_config(tenant_shard_id, config, flush_ms, lazy)
.await
)
}
pub(crate) async fn list_location_config(&self) -> Result<LocationConfigListResponse> {
measured_request!(
"location_configs",
crate::metrics::Method::Get,
&self.node_id_label,
self.inner.list_location_config().await
)
}
pub(crate) async fn get_location_config(
&self,
tenant_shard_id: TenantShardId,
) -> Result<Option<LocationConfig>> {
measured_request!(
"location_config",
crate::metrics::Method::Get,
&self.node_id_label,
self.inner.get_location_config(tenant_shard_id).await
)
}
pub(crate) async fn timeline_create(
&self,
tenant_shard_id: TenantShardId,
req: &TimelineCreateRequest,
) -> Result<TimelineInfo> {
measured_request!(
"timeline",
crate::metrics::Method::Post,
&self.node_id_label,
self.inner.timeline_create(tenant_shard_id, req).await
)
}
pub(crate) async fn timeline_delete(
&self,
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
) -> Result<StatusCode> {
measured_request!(
"timeline",
crate::metrics::Method::Delete,
&self.node_id_label,
self.inner
.timeline_delete(tenant_shard_id, timeline_id)
.await
)
}
pub(crate) async fn tenant_shard_split(
&self,
tenant_shard_id: TenantShardId,
req: TenantShardSplitRequest,
) -> Result<TenantShardSplitResponse> {
measured_request!(
"tenant_shard_split",
crate::metrics::Method::Put,
&self.node_id_label,
self.inner.tenant_shard_split(tenant_shard_id, req).await
)
}
pub(crate) async fn timeline_list(
&self,
tenant_shard_id: &TenantShardId,
) -> Result<Vec<TimelineInfo>> {
measured_request!(
"timelines",
crate::metrics::Method::Get,
&self.node_id_label,
self.inner.timeline_list(tenant_shard_id).await
)
}
pub(crate) async fn get_utilization(&self) -> Result<PageserverUtilization> {
measured_request!(
"utilization",
crate::metrics::Method::Get,
&self.node_id_label,
self.inner.get_utilization().await
)
}
}

View File

@@ -1,32 +1,25 @@
pub(crate) mod split_state;
use std::collections::HashMap; use std::collections::HashMap;
use std::str::FromStr; use std::str::FromStr;
use std::time::Duration;
use self::split_state::SplitState;
use camino::Utf8Path; use camino::Utf8Path;
use camino::Utf8PathBuf; use camino::Utf8PathBuf;
use control_plane::attachment_service::{NodeAvailability, NodeSchedulingPolicy};
use diesel::pg::PgConnection; use diesel::pg::PgConnection;
use diesel::prelude::*; use diesel::prelude::*;
use diesel::Connection; use diesel::Connection;
use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy};
use pageserver_api::models::TenantConfig; use pageserver_api::models::TenantConfig;
use pageserver_api::shard::ShardConfigError;
use pageserver_api::shard::ShardIdentity;
use pageserver_api::shard::ShardStripeSize;
use pageserver_api::shard::{ShardCount, ShardNumber, TenantShardId}; use pageserver_api::shard::{ShardCount, ShardNumber, TenantShardId};
use postgres_connection::parse_host_port;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::generation::Generation; use utils::generation::Generation;
use utils::id::{NodeId, TenantId}; use utils::id::{NodeId, TenantId};
use crate::metrics::{
DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
};
use crate::node::Node; use crate::node::Node;
use crate::PlacementPolicy;
/// ## What do we store? /// ## What do we store?
/// ///
/// The storage controller service does not store most of its state durably. /// The attachment service does not store most of its state durably.
/// ///
/// The essential things to store durably are: /// The essential things to store durably are:
/// - generation numbers, as these must always advance monotonically to ensure data safety. /// - generation numbers, as these must always advance monotonically to ensure data safety.
@@ -40,7 +33,7 @@ use crate::node::Node;
/// ///
/// ## Performance/efficiency /// ## Performance/efficiency
/// ///
/// The storage controller service does not go via the database for most things: there are /// The attachment service does not go via the database for most things: there are
/// a couple of places where we must, and where efficiency matters: /// a couple of places where we must, and where efficiency matters:
/// - Incrementing generation numbers: the Reconciler has to wait for this to complete /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
/// before it can attach a tenant, so this acts as a bound on how fast things like /// before it can attach a tenant, so this acts as a bound on how fast things like
@@ -52,7 +45,7 @@ use crate::node::Node;
/// updated, and reads of nodes are always from memory, not the database. We only require that /// updated, and reads of nodes are always from memory, not the database. We only require that
/// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline. /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
pub struct Persistence { pub struct Persistence {
connection_pool: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<PgConnection>>, database_url: String,
// In test environments, we support loading+saving a JSON file. This is temporary, for the benefit of // In test environments, we support loading+saving a JSON file. This is temporary, for the benefit of
// test_compatibility.py, so that we don't have to commit to making the database contents fully backward/forward // test_compatibility.py, so that we don't have to commit to making the database contents fully backward/forward
@@ -72,177 +65,122 @@ pub(crate) enum DatabaseError {
Query(#[from] diesel::result::Error), Query(#[from] diesel::result::Error),
#[error(transparent)] #[error(transparent)]
Connection(#[from] diesel::result::ConnectionError), Connection(#[from] diesel::result::ConnectionError),
#[error(transparent)]
ConnectionPool(#[from] r2d2::Error),
#[error("Logical error: {0}")] #[error("Logical error: {0}")]
Logical(String), Logical(String),
} }
#[derive(measured::FixedCardinalityLabel, Clone)]
pub(crate) enum DatabaseOperation {
InsertNode,
UpdateNode,
DeleteNode,
ListNodes,
BeginShardSplit,
CompleteShardSplit,
AbortShardSplit,
Detach,
ReAttach,
IncrementGeneration,
ListTenantShards,
InsertTenantShards,
UpdateTenantShard,
DeleteTenant,
UpdateTenantConfig,
}
#[must_use]
pub(crate) enum AbortShardSplitStatus {
/// We aborted the split in the database by reverting to the parent shards
Aborted,
/// The split had already been persisted.
Complete,
}
pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>; pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
impl Persistence { impl Persistence {
// The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
// normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
pub const MAX_CONNECTIONS: u32 = 99;
// We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
pub fn new(database_url: String, json_path: Option<Utf8PathBuf>) -> Self { pub fn new(database_url: String, json_path: Option<Utf8PathBuf>) -> Self {
let manager = diesel::r2d2::ConnectionManager::<PgConnection>::new(database_url);
// We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
// to execute queries (database queries are not generally on latency-sensitive paths).
let connection_pool = diesel::r2d2::Pool::builder()
.max_size(Self::MAX_CONNECTIONS)
.max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
.idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
// Always keep at least one connection ready to go
.min_idle(Some(1))
.test_on_check_out(true)
.build(manager)
.expect("Could not build connection pool");
Self { Self {
connection_pool, database_url,
json_path, json_path,
} }
} }
/// Wraps `with_conn` in order to collect latency and error metrics
async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R>
where
F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
R: Send + 'static,
{
let latency = &METRICS_REGISTRY
.metrics_group
.storage_controller_database_query_latency;
let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup {
operation: op.clone(),
});
let res = self.with_conn(func).await;
if let Err(err) = &res {
let error_counter = &METRICS_REGISTRY
.metrics_group
.storage_controller_database_query_error;
error_counter.inc(DatabaseQueryErrorLabelGroup {
error_type: err.error_label(),
operation: op,
})
}
res
}
/// Call the provided function in a tokio blocking thread, with a Diesel database connection. /// Call the provided function in a tokio blocking thread, with a Diesel database connection.
async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R> async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R>
where where
F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static, F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
R: Send + 'static, R: Send + 'static,
{ {
let mut conn = self.connection_pool.get()?; let database_url = self.database_url.clone();
tokio::task::spawn_blocking(move || -> DatabaseResult<R> { func(&mut conn) }) tokio::task::spawn_blocking(move || -> DatabaseResult<R> {
.await // TODO: connection pooling, such as via diesel::r2d2
.expect("Task panic") let mut conn = PgConnection::establish(&database_url)?;
func(&mut conn)
})
.await
.expect("Task panic")
} }
/// When a node is first registered, persist it before using it for anything /// When a node is first registered, persist it before using it for anything
pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> { pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
let np = node.to_persistent(); let np = node.to_persistent();
self.with_measured_conn( self.with_conn(move |conn| -> DatabaseResult<()> {
DatabaseOperation::InsertNode, diesel::insert_into(crate::schema::nodes::table)
move |conn| -> DatabaseResult<()> { .values(&np)
diesel::insert_into(crate::schema::nodes::table) .execute(conn)?;
.values(&np) Ok(())
.execute(conn)?; })
Ok(())
},
)
.await .await
} }
/// At startup, populate the list of nodes which our shards may be placed on /// At startup, populate the list of nodes which our shards may be placed on
pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> { pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<Node>> {
let nodes: Vec<NodePersistence> = self let nodes: Vec<Node> = self
.with_measured_conn( .with_conn(move |conn| -> DatabaseResult<_> {
DatabaseOperation::ListNodes, Ok(crate::schema::nodes::table
move |conn| -> DatabaseResult<_> { .load::<NodePersistence>(conn)?
Ok(crate::schema::nodes::table.load::<NodePersistence>(conn)?) .into_iter()
}, .map(|n| Node {
) id: NodeId(n.node_id as u64),
// At startup we consider a node offline until proven otherwise.
availability: NodeAvailability::Offline,
scheduling: NodeSchedulingPolicy::from_str(&n.scheduling_policy)
.expect("Bad scheduling policy in DB"),
listen_http_addr: n.listen_http_addr,
listen_http_port: n.listen_http_port as u16,
listen_pg_addr: n.listen_pg_addr,
listen_pg_port: n.listen_pg_port as u16,
})
.collect::<Vec<Node>>())
})
.await?; .await?;
if nodes.is_empty() {
return self.list_nodes_local_env().await;
}
tracing::info!("list_nodes: loaded {} nodes", nodes.len()); tracing::info!("list_nodes: loaded {} nodes", nodes.len());
Ok(nodes) Ok(nodes)
} }
pub(crate) async fn update_node( /// Shim for automated compatibility tests: load nodes from LocalEnv instead of database
&self, pub(crate) async fn list_nodes_local_env(&self) -> DatabaseResult<Vec<Node>> {
input_node_id: NodeId, // Enable test_backward_compatibility to work by populating our list of
input_scheduling: NodeSchedulingPolicy, // nodes from LocalEnv when it is not present in persistent storage. Otherwise at
) -> DatabaseResult<()> { // first startup in the compat test, we may have shards but no nodes.
use crate::schema::nodes::dsl::*; use control_plane::local_env::LocalEnv;
let updated = self let env = LocalEnv::load_config().map_err(|e| DatabaseError::Logical(format!("{e}")))?;
.with_measured_conn(DatabaseOperation::UpdateNode, move |conn| { tracing::info!(
let updated = diesel::update(nodes) "Loading {} pageserver nodes from LocalEnv",
.filter(node_id.eq(input_node_id.0 as i64)) env.pageservers.len()
.set((scheduling_policy.eq(String::from(input_scheduling)),)) );
.execute(conn)?; let mut nodes = Vec::new();
Ok(updated) for ps_conf in env.pageservers {
}) let (pg_host, pg_port) =
.await?; parse_host_port(&ps_conf.listen_pg_addr).expect("Unable to parse listen_pg_addr");
let (http_host, http_port) = parse_host_port(&ps_conf.listen_http_addr)
.expect("Unable to parse listen_http_addr");
let node = Node {
id: ps_conf.id,
listen_pg_addr: pg_host.to_string(),
listen_pg_port: pg_port.unwrap_or(5432),
listen_http_addr: http_host.to_string(),
listen_http_port: http_port.unwrap_or(80),
availability: NodeAvailability::Active,
scheduling: NodeSchedulingPolicy::Active,
};
if updated != 1 { // Synchronize database with what we learn from LocalEnv
Err(DatabaseError::Logical(format!( self.insert_node(&node).await?;
"Node {node_id:?} not found for update",
))) nodes.push(node);
} else {
Ok(())
} }
Ok(nodes)
} }
/// At startup, load the high level state for shards, such as their config + policy. This will /// At startup, load the high level state for shards, such as their config + policy. This will
/// be enriched at runtime with state discovered on pageservers. /// be enriched at runtime with state discovered on pageservers.
pub(crate) async fn list_tenant_shards(&self) -> DatabaseResult<Vec<TenantShardPersistence>> { pub(crate) async fn list_tenant_shards(&self) -> DatabaseResult<Vec<TenantShardPersistence>> {
let loaded = self let loaded = self
.with_measured_conn( .with_conn(move |conn| -> DatabaseResult<_> {
DatabaseOperation::ListTenantShards, Ok(crate::schema::tenant_shards::table.load::<TenantShardPersistence>(conn)?)
move |conn| -> DatabaseResult<_> { })
Ok(crate::schema::tenant_shards::table.load::<TenantShardPersistence>(conn)?)
},
)
.await?; .await?;
if loaded.is_empty() { if loaded.is_empty() {
@@ -270,10 +208,15 @@ impl Persistence {
let mut decoded = serde_json::from_slice::<JsonPersistence>(&bytes) let mut decoded = serde_json::from_slice::<JsonPersistence>(&bytes)
.map_err(|e| DatabaseError::Logical(format!("Deserialization error: {e}")))?; .map_err(|e| DatabaseError::Logical(format!("Deserialization error: {e}")))?;
for shard in decoded.tenants.values_mut() { for (tenant_id, tenant) in &mut decoded.tenants {
if shard.placement_policy == "\"Single\"" { // Backward compat: an old attachments.json from before PR #6251, replace
// Backward compat for test data after PR https://github.com/neondatabase/neon/pull/7165 // empty strings with proper defaults.
shard.placement_policy = "{\"Attached\":0}".to_string(); if tenant.tenant_id.is_empty() {
tenant.tenant_id = tenant_id.to_string();
tenant.config = serde_json::to_string(&TenantConfig::default())
.map_err(|e| DatabaseError::Logical(format!("Serialization error: {e}")))?;
tenant.placement_policy = serde_json::to_string(&PlacementPolicy::default())
.map_err(|e| DatabaseError::Logical(format!("Serialization error: {e}")))?;
} }
} }
@@ -297,7 +240,7 @@ impl Persistence {
let tenant_shard_id = TenantShardId { let tenant_shard_id = TenantShardId {
tenant_id: TenantId::from_str(tsp.tenant_id.as_str())?, tenant_id: TenantId::from_str(tsp.tenant_id.as_str())?,
shard_number: ShardNumber(tsp.shard_number as u8), shard_number: ShardNumber(tsp.shard_number as u8),
shard_count: ShardCount::new(tsp.shard_count as u8), shard_count: ShardCount(tsp.shard_count as u8),
}; };
tenants_map.insert(tenant_shard_id, tsp); tenants_map.insert(tenant_shard_id, tsp);
@@ -319,52 +262,32 @@ impl Persistence {
shards: Vec<TenantShardPersistence>, shards: Vec<TenantShardPersistence>,
) -> DatabaseResult<()> { ) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn( self.with_conn(move |conn| -> DatabaseResult<()> {
DatabaseOperation::InsertTenantShards, conn.transaction(|conn| -> QueryResult<()> {
move |conn| -> DatabaseResult<()> { for tenant in &shards {
conn.transaction(|conn| -> QueryResult<()> { diesel::insert_into(tenant_shards)
for tenant in &shards { .values(tenant)
diesel::insert_into(tenant_shards) .execute(conn)?;
.values(tenant) }
.execute(conn)?;
}
Ok(())
})?;
Ok(()) Ok(())
}, })?;
) Ok(())
})
.await .await
} }
/// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
/// the tenant from memory on this server. /// the tenant from memory on this server.
#[allow(unused)]
pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> { pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn( self.with_conn(move |conn| -> DatabaseResult<()> {
DatabaseOperation::DeleteTenant, diesel::delete(tenant_shards)
move |conn| -> DatabaseResult<()> { .filter(tenant_id.eq(del_tenant_id.to_string()))
diesel::delete(tenant_shards) .execute(conn)?;
.filter(tenant_id.eq(del_tenant_id.to_string()))
.execute(conn)?;
Ok(()) Ok(())
}, })
)
.await
}
pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
use crate::schema::nodes::dsl::*;
self.with_measured_conn(
DatabaseOperation::DeleteNode,
move |conn| -> DatabaseResult<()> {
diesel::delete(nodes)
.filter(node_id.eq(del_node_id.0 as i64))
.execute(conn)?;
Ok(())
},
)
.await .await
} }
@@ -378,7 +301,7 @@ impl Persistence {
) -> DatabaseResult<HashMap<TenantShardId, Generation>> { ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
let updated = self let updated = self
.with_measured_conn(DatabaseOperation::ReAttach, move |conn| { .with_conn(move |conn| {
let rows_updated = diesel::update(tenant_shards) let rows_updated = diesel::update(tenant_shards)
.filter(generation_pageserver.eq(node_id.0 as i64)) .filter(generation_pageserver.eq(node_id.0 as i64))
.set(generation.eq(generation + 1)) .set(generation.eq(generation + 1))
@@ -402,17 +325,9 @@ impl Persistence {
tenant_id: TenantId::from_str(tsp.tenant_id.as_str()) tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
.map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?, .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
shard_number: ShardNumber(tsp.shard_number as u8), shard_number: ShardNumber(tsp.shard_number as u8),
shard_count: ShardCount::new(tsp.shard_count as u8), shard_count: ShardCount(tsp.shard_count as u8),
}; };
result.insert(tenant_shard_id, Generation::new(tsp.generation as u32));
let Some(g) = tsp.generation else {
// If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
// we only set generation_pageserver when setting generation.
return Err(DatabaseError::Logical(
"Generation should always be set after incrementing".to_string(),
));
};
result.insert(tenant_shard_id, Generation::new(g as u32));
} }
Ok(result) Ok(result)
@@ -428,11 +343,11 @@ impl Persistence {
) -> anyhow::Result<Generation> { ) -> anyhow::Result<Generation> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
let updated = self let updated = self
.with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| { .with_conn(move |conn| {
let updated = diesel::update(tenant_shards) let updated = diesel::update(tenant_shards)
.filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string())) .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
.filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32)) .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
.filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32)) .filter(shard_count.eq(tenant_shard_id.shard_count.0 as i32))
.set(( .set((
generation.eq(generation + 1), generation.eq(generation + 1),
generation_pageserver.eq(node_id.0 as i64), generation_pageserver.eq(node_id.0 as i64),
@@ -445,96 +360,18 @@ impl Persistence {
}) })
.await?; .await?;
// Generation is always non-null in the rseult: if the generation column had been NULL, then we Ok(Generation::new(updated.generation as u32))
// should have experienced an SQL Confilict error while executing a query that tries to increment it.
debug_assert!(updated.generation.is_some());
let Some(g) = updated.generation else {
return Err(DatabaseError::Logical(
"Generation should always be set after incrementing".to_string(),
)
.into());
};
Ok(Generation::new(g as u32))
}
/// For use when updating a persistent property of a tenant, such as its config or placement_policy.
///
/// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
/// API: use [`Self::increment_generation`] instead. Setting the generation via this route is a one-time thing
/// that we only do the first time a tenant is set to an attached policy via /location_config.
pub(crate) async fn update_tenant_shard(
&self,
tenant_shard_id: TenantShardId,
input_placement_policy: PlacementPolicy,
input_config: TenantConfig,
input_generation: Option<Generation>,
) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
let query = diesel::update(tenant_shards)
.filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
.filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
.filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32));
if let Some(input_generation) = input_generation {
// Update includes generation column
query
.set((
generation.eq(Some(input_generation.into().unwrap() as i32)),
placement_policy
.eq(serde_json::to_string(&input_placement_policy).unwrap()),
config.eq(serde_json::to_string(&input_config).unwrap()),
))
.execute(conn)?;
} else {
// Update does not include generation column
query
.set((
placement_policy
.eq(serde_json::to_string(&input_placement_policy).unwrap()),
config.eq(serde_json::to_string(&input_config).unwrap()),
))
.execute(conn)?;
}
Ok(())
})
.await?;
Ok(())
}
pub(crate) async fn update_tenant_config(
&self,
input_tenant_id: TenantId,
input_config: TenantConfig,
) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::UpdateTenantConfig, move |conn| {
diesel::update(tenant_shards)
.filter(tenant_id.eq(input_tenant_id.to_string()))
.set((config.eq(serde_json::to_string(&input_config).unwrap()),))
.execute(conn)?;
Ok(())
})
.await?;
Ok(())
} }
pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> { pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::Detach, move |conn| { self.with_conn(move |conn| {
let updated = diesel::update(tenant_shards) let updated = diesel::update(tenant_shards)
.filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string())) .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
.filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32)) .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
.filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32)) .filter(shard_count.eq(tenant_shard_id.shard_count.0 as i32))
.set(( .set((
generation_pageserver.eq(Option::<i64>::None), generation_pageserver.eq(i64::MAX),
placement_policy.eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()), placement_policy.eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
)) ))
.execute(conn)?; .execute(conn)?;
@@ -546,160 +383,24 @@ impl Persistence {
Ok(()) Ok(())
} }
// When we start shard splitting, we must durably mark the tenant so that // TODO: when we start shard splitting, we must durably mark the tenant so that
// on restart, we know that we must go through recovery. // on restart, we know that we must go through recovery (list shards that exist
// // and pick up where we left off and/or revert to parent shards).
// We create the child shards here, so that they will be available for increment_generation calls #[allow(dead_code)]
// if some pageserver holding a child shard needs to restart before the overall tenant split is complete. pub(crate) async fn begin_shard_split(&self, _tenant_id: TenantId) -> anyhow::Result<()> {
pub(crate) async fn begin_shard_split( todo!();
&self,
old_shard_count: ShardCount,
split_tenant_id: TenantId,
parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| -> DatabaseResult<()> {
conn.transaction(|conn| -> DatabaseResult<()> {
// Mark parent shards as splitting
let updated = diesel::update(tenant_shards)
.filter(tenant_id.eq(split_tenant_id.to_string()))
.filter(shard_count.eq(old_shard_count.literal() as i32))
.set((splitting.eq(1),))
.execute(conn)?;
if u8::try_from(updated)
.map_err(|_| DatabaseError::Logical(
format!("Overflow existing shard count {} while splitting", updated))
)? != old_shard_count.count() {
// Perhaps a deletion or another split raced with this attempt to split, mutating
// the parent shards that we intend to split. In this case the split request should fail.
return Err(DatabaseError::Logical(
format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
));
}
// FIXME: spurious clone to sidestep closure move rules
let parent_to_children = parent_to_children.clone();
// Insert child shards
for (parent_shard_id, children) in parent_to_children {
let mut parent = crate::schema::tenant_shards::table
.filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
.filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
.filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
.load::<TenantShardPersistence>(conn)?;
let parent = if parent.len() != 1 {
return Err(DatabaseError::Logical(format!(
"Parent shard {parent_shard_id} not found"
)));
} else {
parent.pop().unwrap()
};
for mut shard in children {
// Carry the parent's generation into the child
shard.generation = parent.generation;
debug_assert!(shard.splitting == SplitState::Splitting);
diesel::insert_into(tenant_shards)
.values(shard)
.execute(conn)?;
}
}
Ok(())
})?;
Ok(())
})
.await
} }
// When we finish shard splitting, we must atomically clean up the old shards // TODO: when we finish shard splitting, we must atomically clean up the old shards
// and insert the new shards, and clear the splitting marker. // and insert the new shards, and clear the splitting marker.
pub(crate) async fn complete_shard_split( #[allow(dead_code)]
&self, pub(crate) async fn complete_shard_split(&self, _tenant_id: TenantId) -> anyhow::Result<()> {
split_tenant_id: TenantId, todo!();
old_shard_count: ShardCount,
) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(
DatabaseOperation::CompleteShardSplit,
move |conn| -> DatabaseResult<()> {
conn.transaction(|conn| -> QueryResult<()> {
// Drop parent shards
diesel::delete(tenant_shards)
.filter(tenant_id.eq(split_tenant_id.to_string()))
.filter(shard_count.eq(old_shard_count.literal() as i32))
.execute(conn)?;
// Clear sharding flag
let updated = diesel::update(tenant_shards)
.filter(tenant_id.eq(split_tenant_id.to_string()))
.set((splitting.eq(0),))
.execute(conn)?;
debug_assert!(updated > 0);
Ok(())
})?;
Ok(())
},
)
.await
}
/// Used when the remote part of a shard split failed: we will revert the database state to have only
/// the parent shards, with SplitState::Idle.
pub(crate) async fn abort_shard_split(
&self,
split_tenant_id: TenantId,
new_shard_count: ShardCount,
) -> DatabaseResult<AbortShardSplitStatus> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(
DatabaseOperation::AbortShardSplit,
move |conn| -> DatabaseResult<AbortShardSplitStatus> {
let aborted =
conn.transaction(|conn| -> DatabaseResult<AbortShardSplitStatus> {
// Clear the splitting state on parent shards
let updated = diesel::update(tenant_shards)
.filter(tenant_id.eq(split_tenant_id.to_string()))
.filter(shard_count.ne(new_shard_count.literal() as i32))
.set((splitting.eq(0),))
.execute(conn)?;
// Parent shards are already gone: we cannot abort.
if updated == 0 {
return Ok(AbortShardSplitStatus::Complete);
}
// Sanity check: if parent shards were present, their cardinality should
// be less than the number of child shards.
if updated >= new_shard_count.count() as usize {
return Err(DatabaseError::Logical(format!(
"Unexpected parent shard count {updated} while aborting split to \
count {new_shard_count:?} on tenant {split_tenant_id}"
)));
}
// Erase child shards
diesel::delete(tenant_shards)
.filter(tenant_id.eq(split_tenant_id.to_string()))
.filter(shard_count.eq(new_shard_count.literal() as i32))
.execute(conn)?;
Ok(AbortShardSplitStatus::Aborted)
})?;
Ok(aborted)
},
)
.await
} }
} }
/// Parts of [`crate::tenant_state::TenantState`] that are stored durably /// Parts of [`crate::tenant_state::TenantState`] that are stored durably
#[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)] #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone)]
#[diesel(table_name = crate::schema::tenant_shards)] #[diesel(table_name = crate::schema::tenant_shards)]
pub(crate) struct TenantShardPersistence { pub(crate) struct TenantShardPersistence {
#[serde(default)] #[serde(default)]
@@ -712,48 +413,21 @@ pub(crate) struct TenantShardPersistence {
pub(crate) shard_stripe_size: i32, pub(crate) shard_stripe_size: i32,
// Latest generation number: next time we attach, increment this // Latest generation number: next time we attach, increment this
// and use the incremented number when attaching. // and use the incremented number when attaching
// pub(crate) generation: i32,
// Generation is only None when first onboarding a tenant, where it may
// be in PlacementPolicy::Secondary and therefore have no valid generation state.
pub(crate) generation: Option<i32>,
// Currently attached pageserver // Currently attached pageserver
#[serde(rename = "pageserver")] #[serde(rename = "pageserver")]
pub(crate) generation_pageserver: Option<i64>, pub(crate) generation_pageserver: i64,
#[serde(default)] #[serde(default)]
pub(crate) placement_policy: String, pub(crate) placement_policy: String,
#[serde(default)] #[serde(default)]
pub(crate) splitting: SplitState,
#[serde(default)]
pub(crate) config: String, pub(crate) config: String,
} }
impl TenantShardPersistence {
pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
if self.shard_count == 0 {
Ok(ShardIdentity::unsharded())
} else {
Ok(ShardIdentity::new(
ShardNumber(self.shard_number as u8),
ShardCount::new(self.shard_count as u8),
ShardStripeSize(self.shard_stripe_size as u32),
)?)
}
}
pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
Ok(TenantShardId {
tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
shard_number: ShardNumber(self.shard_number as u8),
shard_count: ShardCount::new(self.shard_count as u8),
})
}
}
/// Parts of [`crate::node::Node`] that are stored durably /// Parts of [`crate::node::Node`] that are stored durably
#[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)] #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable)]
#[diesel(table_name = crate::schema::nodes)] #[diesel(table_name = crate::schema::nodes)]
pub(crate) struct NodePersistence { pub(crate) struct NodePersistence {
pub(crate) node_id: i64, pub(crate) node_id: i64,

View File

@@ -1,46 +0,0 @@
use diesel::pg::{Pg, PgValue};
use diesel::{
deserialize::FromSql, deserialize::FromSqlRow, expression::AsExpression, serialize::ToSql,
sql_types::Int2,
};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, FromSqlRow, AsExpression)]
#[diesel(sql_type = SplitStateSQLRepr)]
#[derive(Deserialize, Serialize)]
pub enum SplitState {
Idle = 0,
Splitting = 1,
}
impl Default for SplitState {
fn default() -> Self {
Self::Idle
}
}
type SplitStateSQLRepr = Int2;
impl ToSql<SplitStateSQLRepr, Pg> for SplitState {
fn to_sql<'a>(
&'a self,
out: &'a mut diesel::serialize::Output<Pg>,
) -> diesel::serialize::Result {
let raw_value: i16 = *self as i16;
let mut new_out = out.reborrow();
ToSql::<SplitStateSQLRepr, Pg>::to_sql(&raw_value, &mut new_out)
}
}
impl FromSql<SplitStateSQLRepr, Pg> for SplitState {
fn from_sql(pg_value: PgValue) -> diesel::deserialize::Result<Self> {
match FromSql::<SplitStateSQLRepr, Pg>::from_sql(pg_value).map(|v| match v {
0 => Some(Self::Idle),
1 => Some(Self::Splitting),
_ => None,
})? {
Some(v) => Ok(v),
None => Err(format!("Invalid SplitState value, was: {:?}", pg_value.as_bytes()).into()),
}
}
}

View File

@@ -1,7 +1,6 @@
use crate::pageserver_client::PageserverClient;
use crate::persistence::Persistence; use crate::persistence::Persistence;
use crate::service; use crate::service;
use hyper::StatusCode; use control_plane::attachment_service::NodeAvailability;
use pageserver_api::models::{ use pageserver_api::models::{
LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig, LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig,
}; };
@@ -9,19 +8,16 @@ use pageserver_api::shard::{ShardIdentity, TenantShardId};
use pageserver_client::mgmt_api; use pageserver_client::mgmt_api;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::Duration;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use utils::generation::Generation; use utils::generation::Generation;
use utils::id::{NodeId, TimelineId}; use utils::id::{NodeId, TimelineId};
use utils::lsn::Lsn; use utils::lsn::Lsn;
use utils::sync::gate::GateGuard;
use crate::compute_hook::{ComputeHook, NotifyError}; use crate::compute_hook::ComputeHook;
use crate::node::Node; use crate::node::Node;
use crate::tenant_state::{IntentState, ObservedState, ObservedStateLocation}; use crate::tenant_state::{IntentState, ObservedState, ObservedStateLocation};
const DEFAULT_HEATMAP_PERIOD: &str = "60s";
/// Object with the lifetime of the background reconcile task that is created /// Object with the lifetime of the background reconcile task that is created
/// for tenants which have a difference between their intent and observed states. /// for tenants which have a difference between their intent and observed states.
pub(super) struct Reconciler { pub(super) struct Reconciler {
@@ -29,27 +25,20 @@ pub(super) struct Reconciler {
/// of a tenant's state from when we spawned a reconcile task. /// of a tenant's state from when we spawned a reconcile task.
pub(super) tenant_shard_id: TenantShardId, pub(super) tenant_shard_id: TenantShardId,
pub(crate) shard: ShardIdentity, pub(crate) shard: ShardIdentity,
pub(crate) generation: Option<Generation>, pub(crate) generation: Generation,
pub(crate) intent: TargetState, pub(crate) intent: IntentState,
/// Nodes not referenced by [`Self::intent`], from which we should try
/// to detach this tenant shard.
pub(crate) detach: Vec<Node>,
pub(crate) config: TenantConfig, pub(crate) config: TenantConfig,
pub(crate) observed: ObservedState, pub(crate) observed: ObservedState,
pub(crate) service_config: service::Config, pub(crate) service_config: service::Config,
/// A hook to notify the running postgres instances when we change the location /// A snapshot of the pageservers as they were when we were asked
/// of a tenant. Use this via [`Self::compute_notify`] to update our failure flag /// to reconcile.
/// and guarantee eventual retries. pub(crate) pageservers: Arc<HashMap<NodeId, Node>>,
pub(crate) compute_hook: Arc<ComputeHook>,
/// To avoid stalling if the cloud control plane is unavailable, we may proceed /// A hook to notify the running postgres instances when we change the location
/// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed /// of a tenant
/// so that we can set [`crate::tenant_state::TenantState::pending_compute_notification`] to ensure a later retry. pub(crate) compute_hook: Arc<ComputeHook>,
pub(crate) compute_notify_failure: bool,
/// A means to abort background reconciliation: it is essential to /// A means to abort background reconciliation: it is essential to
/// call this when something changes in the original TenantState that /// call this when something changes in the original TenantState that
@@ -58,54 +47,12 @@ pub(super) struct Reconciler {
/// the tenant is changed. /// the tenant is changed.
pub(crate) cancel: CancellationToken, pub(crate) cancel: CancellationToken,
/// Reconcilers are registered with a Gate so that during a graceful shutdown we
/// can wait for all the reconcilers to respond to their cancellation tokens.
pub(crate) _gate_guard: GateGuard,
/// Access to persistent storage for updating generation numbers /// Access to persistent storage for updating generation numbers
pub(crate) persistence: Arc<Persistence>, pub(crate) persistence: Arc<Persistence>,
} }
/// This is a snapshot of [`crate::tenant_state::IntentState`], but it does not do any
/// reference counting for Scheduler. The IntentState is what the scheduler works with,
/// and the TargetState is just the instruction for a particular Reconciler run.
#[derive(Debug)]
pub(crate) struct TargetState {
pub(crate) attached: Option<Node>,
pub(crate) secondary: Vec<Node>,
}
impl TargetState {
pub(crate) fn from_intent(nodes: &HashMap<NodeId, Node>, intent: &IntentState) -> Self {
Self {
attached: intent.get_attached().map(|n| {
nodes
.get(&n)
.expect("Intent attached referenced non-existent node")
.clone()
}),
secondary: intent
.get_secondary()
.iter()
.map(|n| {
nodes
.get(n)
.expect("Intent secondary referenced non-existent node")
.clone()
})
.collect(),
}
}
}
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub(crate) enum ReconcileError { pub enum ReconcileError {
#[error(transparent)]
Remote(#[from] mgmt_api::Error),
#[error(transparent)]
Notify(#[from] NotifyError),
#[error("Cancelled")]
Cancel,
#[error(transparent)] #[error(transparent)]
Other(#[from] anyhow::Error), Other(#[from] anyhow::Error),
} }
@@ -113,99 +60,44 @@ pub(crate) enum ReconcileError {
impl Reconciler { impl Reconciler {
async fn location_config( async fn location_config(
&mut self, &mut self,
node: &Node, node_id: NodeId,
config: LocationConfig, config: LocationConfig,
flush_ms: Option<Duration>, flush_ms: Option<Duration>,
lazy: bool, ) -> anyhow::Result<()> {
) -> Result<(), ReconcileError> { let node = self
if !node.is_available() && config.mode == LocationConfigMode::Detached { .pageservers
// Attempts to detach from offline nodes may be imitated without doing I/O: a node which is offline .get(&node_id)
// will get fully reconciled wrt the shard's intent state when it is reactivated, irrespective of .expect("Pageserver may not be removed while referenced");
// what we put into `observed`, in [`crate::service::Service::node_activate_reconcile`]
tracing::info!("Node {node} is unavailable during detach: proceeding anyway, it will be detached on next activation");
self.observed.locations.remove(&node.get_id());
return Ok(());
}
self.observed self.observed
.locations .locations
.insert(node.get_id(), ObservedStateLocation { conf: None }); .insert(node.id, ObservedStateLocation { conf: None });
// TODO: amend locations that use long-polling: they will hit this timeout. tracing::info!("location_config({}) calling: {:?}", node_id, config);
let timeout = Duration::from_secs(25); let client =
mgmt_api::Client::new(node.base_url(), self.service_config.jwt_token.as_deref());
client
.location_config(self.tenant_shard_id, config.clone(), flush_ms)
.await?;
tracing::info!("location_config({}) complete: {:?}", node_id, config);
tracing::info!("location_config({node}) calling: {:?}", config); self.observed
let tenant_shard_id = self.tenant_shard_id; .locations
let config_ref = &config; .insert(node.id, ObservedStateLocation { conf: Some(config) });
match node
.with_client_retries(
|client| async move {
let config = config_ref.clone();
client
.location_config(tenant_shard_id, config.clone(), flush_ms, lazy)
.await
},
&self.service_config.jwt_token,
1,
3,
timeout,
&self.cancel,
)
.await
{
Some(Ok(_)) => {}
Some(Err(e)) => return Err(e.into()),
None => return Err(ReconcileError::Cancel),
};
tracing::info!("location_config({node}) complete: {:?}", config);
match config.mode {
LocationConfigMode::Detached => {
self.observed.locations.remove(&node.get_id());
}
_ => {
self.observed
.locations
.insert(node.get_id(), ObservedStateLocation { conf: Some(config) });
}
}
Ok(()) Ok(())
} }
fn get_node(&self, node_id: &NodeId) -> Option<&Node> {
if let Some(node) = self.intent.attached.as_ref() {
if node.get_id() == *node_id {
return Some(node);
}
}
if let Some(node) = self
.intent
.secondary
.iter()
.find(|n| n.get_id() == *node_id)
{
return Some(node);
}
if let Some(node) = self.detach.iter().find(|n| n.get_id() == *node_id) {
return Some(node);
}
None
}
async fn maybe_live_migrate(&mut self) -> Result<(), ReconcileError> { async fn maybe_live_migrate(&mut self) -> Result<(), ReconcileError> {
let destination = if let Some(node) = &self.intent.attached { let destination = if let Some(node_id) = self.intent.attached {
match self.observed.locations.get(&node.get_id()) { match self.observed.locations.get(&node_id) {
Some(conf) => { Some(conf) => {
// We will do a live migration only if the intended destination is not // We will do a live migration only if the intended destination is not
// currently in an attached state. // currently in an attached state.
match &conf.conf { match &conf.conf {
Some(conf) if conf.mode == LocationConfigMode::Secondary => { Some(conf) if conf.mode == LocationConfigMode::Secondary => {
// Fall through to do a live migration // Fall through to do a live migration
node node_id
} }
None | Some(_) => { None | Some(_) => {
// Attached or uncertain: don't do a live migration, proceed // Attached or uncertain: don't do a live migration, proceed
@@ -218,7 +110,7 @@ impl Reconciler {
None => { None => {
// Our destination is not attached: maybe live migrate if some other // Our destination is not attached: maybe live migrate if some other
// node is currently attached. Fall through. // node is currently attached. Fall through.
node node_id
} }
} }
} else { } else {
@@ -231,13 +123,15 @@ impl Reconciler {
for (node_id, state) in &self.observed.locations { for (node_id, state) in &self.observed.locations {
if let Some(observed_conf) = &state.conf { if let Some(observed_conf) = &state.conf {
if observed_conf.mode == LocationConfigMode::AttachedSingle { if observed_conf.mode == LocationConfigMode::AttachedSingle {
let node = self
.pageservers
.get(node_id)
.expect("Nodes may not be removed while referenced");
// We will only attempt live migration if the origin is not offline: this // We will only attempt live migration if the origin is not offline: this
// avoids trying to do it while reconciling after responding to an HA failover. // avoids trying to do it while reconciling after responding to an HA failover.
if let Some(node) = self.get_node(node_id) { if !matches!(node.availability, NodeAvailability::Offline) {
if node.is_available() { origin = Some(*node_id);
origin = Some(node.clone()); break;
break;
}
} }
} }
} }
@@ -250,7 +144,7 @@ impl Reconciler {
// We have an origin and a destination: proceed to do the live migration // We have an origin and a destination: proceed to do the live migration
tracing::info!("Live migrating {}->{}", origin, destination); tracing::info!("Live migrating {}->{}", origin, destination);
self.live_migrate(origin, destination.clone()).await?; self.live_migrate(origin, destination).await?;
Ok(()) Ok(())
} }
@@ -258,13 +152,15 @@ impl Reconciler {
async fn get_lsns( async fn get_lsns(
&self, &self,
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
node: &Node, node_id: &NodeId,
) -> anyhow::Result<HashMap<TimelineId, Lsn>> { ) -> anyhow::Result<HashMap<TimelineId, Lsn>> {
let client = PageserverClient::new( let node = self
node.get_id(), .pageservers
node.base_url(), .get(node_id)
self.service_config.jwt_token.as_deref(), .expect("Pageserver may not be removed while referenced");
);
let client =
mgmt_api::Client::new(node.base_url(), self.service_config.jwt_token.as_deref());
let timelines = client.timeline_list(&tenant_shard_id).await?; let timelines = client.timeline_list(&tenant_shard_id).await?;
Ok(timelines Ok(timelines
@@ -273,86 +169,19 @@ impl Reconciler {
.collect()) .collect())
} }
async fn secondary_download( async fn secondary_download(&self, tenant_shard_id: TenantShardId, node_id: &NodeId) {
&self, let node = self
tenant_shard_id: TenantShardId, .pageservers
node: &Node, .get(node_id)
) -> Result<(), ReconcileError> { .expect("Pageserver may not be removed while referenced");
// This is not the timeout for a request, but the total amount of time we're willing to wait
// for a secondary location to get up to date before
const TOTAL_DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(300);
// This the long-polling interval for the secondary download requests we send to destination pageserver let client =
// during a migration. mgmt_api::Client::new(node.base_url(), self.service_config.jwt_token.as_deref());
const REQUEST_DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(20);
let started_at = Instant::now(); match client.tenant_secondary_download(tenant_shard_id).await {
Ok(()) => {}
loop { Err(_) => {
let (status, progress) = match node tracing::info!(" (skipping, destination wasn't in secondary mode)")
.with_client_retries(
|client| async move {
client
.tenant_secondary_download(
tenant_shard_id,
Some(REQUEST_DOWNLOAD_TIMEOUT),
)
.await
},
&self.service_config.jwt_token,
1,
3,
REQUEST_DOWNLOAD_TIMEOUT * 2,
&self.cancel,
)
.await
{
None => Err(ReconcileError::Cancel),
Some(Ok(v)) => Ok(v),
Some(Err(e)) => {
// Give up, but proceed: it's unfortunate if we couldn't freshen the destination before
// attaching, but we should not let an issue with a secondary location stop us proceeding
// with a live migration.
tracing::warn!("Failed to prepare by downloading layers on node {node}: {e})");
return Ok(());
}
}?;
if status == StatusCode::OK {
tracing::info!(
"Downloads to {} complete: {}/{} layers, {}/{} bytes",
node,
progress.layers_downloaded,
progress.layers_total,
progress.bytes_downloaded,
progress.bytes_total
);
return Ok(());
} else if status == StatusCode::ACCEPTED {
let total_runtime = started_at.elapsed();
if total_runtime > TOTAL_DOWNLOAD_TIMEOUT {
tracing::warn!("Timed out after {}ms downloading layers to {node}. Progress so far: {}/{} layers, {}/{} bytes",
total_runtime.as_millis(),
progress.layers_downloaded,
progress.layers_total,
progress.bytes_downloaded,
progress.bytes_total
);
// Give up, but proceed: an incompletely warmed destination doesn't prevent migration working,
// it just makes the I/O performance for users less good.
return Ok(());
}
// Log and proceed around the loop to retry. We don't sleep between requests, because our HTTP call
// to the pageserver is a long-poll.
tracing::info!(
"Downloads to {} not yet complete: {}/{} layers, {}/{} bytes",
node,
progress.layers_downloaded,
progress.layers_total,
progress.bytes_downloaded,
progress.bytes_total
);
} }
} }
} }
@@ -360,14 +189,17 @@ impl Reconciler {
async fn await_lsn( async fn await_lsn(
&self, &self,
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
node: &Node, pageserver_id: &NodeId,
baseline: HashMap<TimelineId, Lsn>, baseline: HashMap<TimelineId, Lsn>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
loop { loop {
let latest = match self.get_lsns(tenant_shard_id, node).await { let latest = match self.get_lsns(tenant_shard_id, pageserver_id).await {
Ok(l) => l, Ok(l) => l,
Err(e) => { Err(e) => {
tracing::info!("🕑 Can't get LSNs on node {node} yet, waiting ({e})",); println!(
"🕑 Can't get LSNs on pageserver {} yet, waiting ({e})",
pageserver_id
);
std::thread::sleep(Duration::from_millis(500)); std::thread::sleep(Duration::from_millis(500));
continue; continue;
} }
@@ -377,7 +209,7 @@ impl Reconciler {
for (timeline_id, baseline_lsn) in &baseline { for (timeline_id, baseline_lsn) in &baseline {
match latest.get(timeline_id) { match latest.get(timeline_id) {
Some(latest_lsn) => { Some(latest_lsn) => {
tracing::info!("🕑 LSN origin {baseline_lsn} vs destination {latest_lsn}"); println!("🕑 LSN origin {baseline_lsn} vs destination {latest_lsn}");
if latest_lsn < baseline_lsn { if latest_lsn < baseline_lsn {
any_behind = true; any_behind = true;
} }
@@ -392,7 +224,7 @@ impl Reconciler {
} }
if !any_behind { if !any_behind {
tracing::info!("✅ LSN caught up. Proceeding..."); println!("✅ LSN caught up. Proceeding...");
break; break;
} else { } else {
std::thread::sleep(Duration::from_millis(500)); std::thread::sleep(Duration::from_millis(500));
@@ -404,11 +236,11 @@ impl Reconciler {
pub async fn live_migrate( pub async fn live_migrate(
&mut self, &mut self,
origin_ps: Node, origin_ps_id: NodeId,
dest_ps: Node, dest_ps_id: NodeId,
) -> Result<(), ReconcileError> { ) -> anyhow::Result<()> {
// `maybe_live_migrate` is responsibble for sanity of inputs // `maybe_live_migrate` is responsibble for sanity of inputs
assert!(origin_ps.get_id() != dest_ps.get_id()); assert!(origin_ps_id != dest_ps_id);
fn build_location_config( fn build_location_config(
shard: &ShardIdentity, shard: &ShardIdentity,
@@ -423,12 +255,15 @@ impl Reconciler {
secondary_conf, secondary_conf,
tenant_conf: config.clone(), tenant_conf: config.clone(),
shard_number: shard.number.0, shard_number: shard.number.0,
shard_count: shard.count.literal(), shard_count: shard.count.0,
shard_stripe_size: shard.stripe_size.0, shard_stripe_size: shard.stripe_size.0,
} }
} }
tracing::info!("🔁 Switching origin node {origin_ps} to stale mode",); tracing::info!(
"🔁 Switching origin pageserver {} to stale mode",
origin_ps_id
);
// FIXME: it is incorrect to use self.generation here, we should use the generation // FIXME: it is incorrect to use self.generation here, we should use the generation
// from the ObservedState of the origin pageserver (it might be older than self.generation) // from the ObservedState of the origin pageserver (it might be older than self.generation)
@@ -436,66 +271,57 @@ impl Reconciler {
&self.shard, &self.shard,
&self.config, &self.config,
LocationConfigMode::AttachedStale, LocationConfigMode::AttachedStale,
self.generation, Some(self.generation),
None, None,
); );
self.location_config(&origin_ps, stale_conf, Some(Duration::from_secs(10)), false) self.location_config(origin_ps_id, stale_conf, Some(Duration::from_secs(10)))
.await?; .await?;
let baseline_lsns = Some(self.get_lsns(self.tenant_shard_id, &origin_ps).await?); let baseline_lsns = Some(self.get_lsns(self.tenant_shard_id, &origin_ps_id).await?);
// If we are migrating to a destination that has a secondary location, warm it up first // If we are migrating to a destination that has a secondary location, warm it up first
if let Some(destination_conf) = self.observed.locations.get(&dest_ps.get_id()) { if let Some(destination_conf) = self.observed.locations.get(&dest_ps_id) {
if let Some(destination_conf) = &destination_conf.conf { if let Some(destination_conf) = &destination_conf.conf {
if destination_conf.mode == LocationConfigMode::Secondary { if destination_conf.mode == LocationConfigMode::Secondary {
tracing::info!("🔁 Downloading latest layers to destination node {dest_ps}",); tracing::info!(
self.secondary_download(self.tenant_shard_id, &dest_ps) "🔁 Downloading latest layers to destination pageserver {}",
.await?; dest_ps_id,
);
self.secondary_download(self.tenant_shard_id, &dest_ps_id)
.await;
} }
} }
} }
// Increment generation before attaching to new pageserver // Increment generation before attaching to new pageserver
self.generation = Some( self.generation = self
self.persistence .persistence
.increment_generation(self.tenant_shard_id, dest_ps.get_id()) .increment_generation(self.tenant_shard_id, dest_ps_id)
.await?, .await?;
);
let dest_conf = build_location_config( let dest_conf = build_location_config(
&self.shard, &self.shard,
&self.config, &self.config,
LocationConfigMode::AttachedMulti, LocationConfigMode::AttachedMulti,
self.generation, Some(self.generation),
None, None,
); );
tracing::info!("🔁 Attaching to pageserver {dest_ps}"); tracing::info!("🔁 Attaching to pageserver {}", dest_ps_id);
self.location_config(&dest_ps, dest_conf, None, false) self.location_config(dest_ps_id, dest_conf, None).await?;
.await?;
if let Some(baseline) = baseline_lsns { if let Some(baseline) = baseline_lsns {
tracing::info!("🕑 Waiting for LSN to catch up..."); tracing::info!("🕑 Waiting for LSN to catch up...");
self.await_lsn(self.tenant_shard_id, &dest_ps, baseline) self.await_lsn(self.tenant_shard_id, &dest_ps_id, baseline)
.await?; .await?;
} }
tracing::info!("🔁 Notifying compute to use pageserver {dest_ps}"); tracing::info!("🔁 Notifying compute to use pageserver {}", dest_ps_id);
self.compute_hook
.notify(self.tenant_shard_id, dest_ps_id)
.await?;
// During a live migration it is unhelpful to proceed if we couldn't notify compute: if we detach // Downgrade the origin to secondary. If the tenant's policy is PlacementPolicy::Single, then
// the origin without notifying compute, we will render the tenant unavailable.
while let Err(e) = self.compute_notify().await {
match e {
NotifyError::Fatal(_) => return Err(ReconcileError::Notify(e)),
_ => {
tracing::warn!(
"Live migration blocked by compute notification error, retrying: {e}"
);
}
}
}
// Downgrade the origin to secondary. If the tenant's policy is PlacementPolicy::Attached(0), then
// this location will be deleted in the general case reconciliation that runs after this. // this location will be deleted in the general case reconciliation that runs after this.
let origin_secondary_conf = build_location_config( let origin_secondary_conf = build_location_config(
&self.shard, &self.shard,
@@ -504,93 +330,39 @@ impl Reconciler {
None, None,
Some(LocationConfigSecondary { warm: true }), Some(LocationConfigSecondary { warm: true }),
); );
self.location_config(&origin_ps, origin_secondary_conf.clone(), None, false) self.location_config(origin_ps_id, origin_secondary_conf.clone(), None)
.await?; .await?;
// TODO: we should also be setting the ObservedState on earlier API calls, in case we fail // TODO: we should also be setting the ObservedState on earlier API calls, in case we fail
// partway through. In fact, all location conf API calls should be in a wrapper that sets // partway through. In fact, all location conf API calls should be in a wrapper that sets
// the observed state to None, then runs, then sets it to what we wrote. // the observed state to None, then runs, then sets it to what we wrote.
self.observed.locations.insert( self.observed.locations.insert(
origin_ps.get_id(), origin_ps_id,
ObservedStateLocation { ObservedStateLocation {
conf: Some(origin_secondary_conf), conf: Some(origin_secondary_conf),
}, },
); );
tracing::info!("🔁 Switching to AttachedSingle mode on node {dest_ps}",); println!(
"🔁 Switching to AttachedSingle mode on pageserver {}",
dest_ps_id
);
let dest_final_conf = build_location_config( let dest_final_conf = build_location_config(
&self.shard, &self.shard,
&self.config, &self.config,
LocationConfigMode::AttachedSingle, LocationConfigMode::AttachedSingle,
self.generation, Some(self.generation),
None, None,
); );
self.location_config(&dest_ps, dest_final_conf.clone(), None, false) self.location_config(dest_ps_id, dest_final_conf.clone(), None)
.await?; .await?;
self.observed.locations.insert( self.observed.locations.insert(
dest_ps.get_id(), dest_ps_id,
ObservedStateLocation { ObservedStateLocation {
conf: Some(dest_final_conf), conf: Some(dest_final_conf),
}, },
); );
tracing::info!("✅ Migration complete"); println!("✅ Migration complete");
Ok(())
}
async fn maybe_refresh_observed(&mut self) -> Result<(), ReconcileError> {
// If the attached node has uncertain state, read it from the pageserver before proceeding: this
// is important to avoid spurious generation increments.
//
// We don't need to do this for secondary/detach locations because it's harmless to just PUT their
// location conf, whereas for attached locations it can interrupt clients if we spuriously destroy/recreate
// the `Timeline` object in the pageserver.
let Some(attached_node) = self.intent.attached.as_ref() else {
// Nothing to do
return Ok(());
};
if matches!(
self.observed.locations.get(&attached_node.get_id()),
Some(ObservedStateLocation { conf: None })
) {
let tenant_shard_id = self.tenant_shard_id;
let observed_conf = match attached_node
.with_client_retries(
|client| async move { client.get_location_config(tenant_shard_id).await },
&self.service_config.jwt_token,
1,
1,
Duration::from_secs(5),
&self.cancel,
)
.await
{
Some(Ok(observed)) => Some(observed),
Some(Err(mgmt_api::Error::ApiError(status, _msg)))
if status == StatusCode::NOT_FOUND =>
{
None
}
Some(Err(e)) => return Err(e.into()),
None => return Err(ReconcileError::Cancel),
};
tracing::info!("Scanned location configuration on {attached_node}: {observed_conf:?}");
match observed_conf {
Some(conf) => {
// Pageserver returned a state: update it in observed. This may still be an indeterminate (None) state,
// if internally the pageserver's TenantSlot was being mutated (e.g. some long running API call is still running)
self.observed
.locations
.insert(attached_node.get_id(), ObservedStateLocation { conf });
}
None => {
// Pageserver returned 404: we have confirmation that there is no state for this shard on that pageserver.
self.observed.locations.remove(&attached_node.get_id());
}
}
}
Ok(()) Ok(())
} }
@@ -602,86 +374,41 @@ impl Reconciler {
/// general case reconciliation where we walk through the intent by pageserver /// general case reconciliation where we walk through the intent by pageserver
/// and call out to the pageserver to apply the desired state. /// and call out to the pageserver to apply the desired state.
pub(crate) async fn reconcile(&mut self) -> Result<(), ReconcileError> { pub(crate) async fn reconcile(&mut self) -> Result<(), ReconcileError> {
// Prepare: if we have uncertain `observed` state for our would-be attachement location, then refresh it // TODO: if any of self.observed is None, call to remote pageservers
self.maybe_refresh_observed().await?; // to learn correct state.
// Special case: live migration // Special case: live migration
self.maybe_live_migrate().await?; self.maybe_live_migrate().await?;
// If the attached pageserver is not attached, do so now. // If the attached pageserver is not attached, do so now.
if let Some(node) = self.intent.attached.as_ref() { if let Some(node_id) = self.intent.attached {
// If we are in an attached policy, then generation must have been set (null generations let mut wanted_conf =
// are only present when a tenant is initially loaded with a secondary policy) attached_location_conf(self.generation, &self.shard, &self.config);
debug_assert!(self.generation.is_some()); match self.observed.locations.get(&node_id) {
let Some(generation) = self.generation else {
return Err(ReconcileError::Other(anyhow::anyhow!(
"Attempted to attach with NULL generation"
)));
};
let mut wanted_conf = attached_location_conf(
generation,
&self.shard,
&self.config,
!self.intent.secondary.is_empty(),
);
match self.observed.locations.get(&node.get_id()) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
// Nothing to do // Nothing to do
tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.") tracing::info!("Observed configuration already correct.")
} }
observed => { _ => {
// In all cases other than a matching observed configuration, we will // In all cases other than a matching observed configuration, we will
// reconcile this location. This includes locations with different configurations, as well // reconcile this location. This includes locations with different configurations, as well
// as locations with unknown (None) observed state. // as locations with unknown (None) observed state.
self.generation = self
// The general case is to increment the generation. However, there are cases .persistence
// where this is not necessary: .increment_generation(self.tenant_shard_id, node_id)
// - if we are only updating the TenantConf part of the location .await?;
// - if we are only changing the attachment mode (e.g. going to attachedmulti or attachedstale) wanted_conf.generation = self.generation.into();
// and the location was already in the correct generation tracing::info!("Observed configuration requires update.");
let increment_generation = match observed { self.location_config(node_id, wanted_conf, None).await?;
None => true, if let Err(e) = self
Some(ObservedStateLocation { conf: None }) => true, .compute_hook
Some(ObservedStateLocation { .notify(self.tenant_shard_id, node_id)
conf: Some(observed), .await
}) => { {
let generations_match = observed.generation == wanted_conf.generation; tracing::warn!(
"Failed to notify compute of newly attached pageserver {node_id}: {e}"
use LocationConfigMode::*; );
let mode_transition_requires_gen_inc =
match (observed.mode, wanted_conf.mode) {
// Usually the short-lived attachment modes (multi and stale) are only used
// in the case of [`Self::live_migrate`], but it is simple to handle them correctly
// here too. Locations are allowed to go Single->Stale and Multi->Single within the same generation.
(AttachedSingle, AttachedStale) => false,
(AttachedMulti, AttachedSingle) => false,
(lhs, rhs) => lhs != rhs,
};
!generations_match || mode_transition_requires_gen_inc
}
};
if increment_generation {
let generation = self
.persistence
.increment_generation(self.tenant_shard_id, node.get_id())
.await?;
self.generation = Some(generation);
wanted_conf.generation = generation.into();
} }
tracing::info!(node_id=%node.get_id(), "Observed configuration requires update.");
// Because `node` comes from a ref to &self, clone it before calling into a &mut self
// function: this could be avoided by refactoring the state mutated by location_config into
// a separate type to Self.
let node = node.clone();
// Use lazy=true, because we may run many of Self concurrently, and do not want to
// overload the pageserver with logical size calculations.
self.location_config(&node, wanted_conf, None, true).await?;
self.compute_notify().await?;
} }
} }
} }
@@ -689,107 +416,66 @@ impl Reconciler {
// Configure secondary locations: if these were previously attached this // Configure secondary locations: if these were previously attached this
// implicitly downgrades them from attached to secondary. // implicitly downgrades them from attached to secondary.
let mut changes = Vec::new(); let mut changes = Vec::new();
for node in &self.intent.secondary { for node_id in &self.intent.secondary {
let wanted_conf = secondary_location_conf(&self.shard, &self.config); let wanted_conf = secondary_location_conf(&self.shard, &self.config);
match self.observed.locations.get(&node.get_id()) { match self.observed.locations.get(node_id) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
// Nothing to do // Nothing to do
tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.") tracing::info!(%node_id, "Observed configuration already correct.")
} }
_ => { _ => {
// In all cases other than a matching observed configuration, we will // In all cases other than a matching observed configuration, we will
// reconcile this location. // reconcile this location.
tracing::info!(node_id=%node.get_id(), "Observed configuration requires update."); tracing::info!(%node_id, "Observed configuration requires update.");
changes.push((node.clone(), wanted_conf)) changes.push((*node_id, wanted_conf))
} }
} }
} }
// Detach any extraneous pageservers that are no longer referenced // Detach any extraneous pageservers that are no longer referenced
// by our intent. // by our intent.
for node in &self.detach { let all_pageservers = self.intent.all_pageservers();
for node_id in self.observed.locations.keys() {
if all_pageservers.contains(node_id) {
// We are only detaching pageservers that aren't used at all.
continue;
}
changes.push(( changes.push((
node.clone(), *node_id,
LocationConfig { LocationConfig {
mode: LocationConfigMode::Detached, mode: LocationConfigMode::Detached,
generation: None, generation: None,
secondary_conf: None, secondary_conf: None,
shard_number: self.shard.number.0, shard_number: self.shard.number.0,
shard_count: self.shard.count.literal(), shard_count: self.shard.count.0,
shard_stripe_size: self.shard.stripe_size.0, shard_stripe_size: self.shard.stripe_size.0,
tenant_conf: self.config.clone(), tenant_conf: self.config.clone(),
}, },
)); ));
} }
for (node, conf) in changes { for (node_id, conf) in changes {
if self.cancel.is_cancelled() { self.location_config(node_id, conf, None).await?;
return Err(ReconcileError::Cancel);
}
self.location_config(&node, conf, None, false).await?;
} }
Ok(()) Ok(())
} }
pub(crate) async fn compute_notify(&mut self) -> Result<(), NotifyError> {
// Whenever a particular Reconciler emits a notification, it is always notifying for the intended
// destination.
if let Some(node) = &self.intent.attached {
let result = self
.compute_hook
.notify(
self.tenant_shard_id,
node.get_id(),
self.shard.stripe_size,
&self.cancel,
)
.await;
if let Err(e) = &result {
// It is up to the caller whether they want to drop out on this error, but they don't have to:
// in general we should avoid letting unavailability of the cloud control plane stop us from
// making progress.
tracing::warn!("Failed to notify compute of attached pageserver {node}: {e}");
// Set this flag so that in our ReconcileResult we will set the flag on the shard that it
// needs to retry at some point.
self.compute_notify_failure = true;
}
result
} else {
Ok(())
}
}
}
/// We tweak the externally-set TenantConfig while configuring
/// locations, using our awareness of whether secondary locations
/// are in use to automatically enable/disable heatmap uploads.
fn ha_aware_config(config: &TenantConfig, has_secondaries: bool) -> TenantConfig {
let mut config = config.clone();
if has_secondaries {
if config.heatmap_period.is_none() {
config.heatmap_period = Some(DEFAULT_HEATMAP_PERIOD.to_string());
}
} else {
config.heatmap_period = None;
}
config
} }
pub(crate) fn attached_location_conf( pub(crate) fn attached_location_conf(
generation: Generation, generation: Generation,
shard: &ShardIdentity, shard: &ShardIdentity,
config: &TenantConfig, config: &TenantConfig,
has_secondaries: bool,
) -> LocationConfig { ) -> LocationConfig {
LocationConfig { LocationConfig {
mode: LocationConfigMode::AttachedSingle, mode: LocationConfigMode::AttachedSingle,
generation: generation.into(), generation: generation.into(),
secondary_conf: None, secondary_conf: None,
shard_number: shard.number.0, shard_number: shard.number.0,
shard_count: shard.count.literal(), shard_count: shard.count.0,
shard_stripe_size: shard.stripe_size.0, shard_stripe_size: shard.stripe_size.0,
tenant_conf: ha_aware_config(config, has_secondaries), tenant_conf: config.clone(),
} }
} }
@@ -802,8 +488,8 @@ pub(crate) fn secondary_location_conf(
generation: None, generation: None,
secondary_conf: Some(LocationConfigSecondary { warm: true }), secondary_conf: Some(LocationConfigSecondary { warm: true }),
shard_number: shard.number.0, shard_number: shard.number.0,
shard_count: shard.count.literal(), shard_count: shard.count.0,
shard_stripe_size: shard.stripe_size.0, shard_stripe_size: shard.stripe_size.0,
tenant_conf: ha_aware_config(config, true), tenant_conf: config.clone(),
} }
} }

View File

@@ -1,9 +1,9 @@
use crate::{node::Node, tenant_state::TenantState}; use pageserver_api::shard::TenantShardId;
use pageserver_api::controller_api::UtilizationScore; use std::collections::{BTreeMap, HashMap};
use serde::Serialize;
use std::collections::HashMap;
use utils::{http::error::ApiError, id::NodeId}; use utils::{http::error::ApiError, id::NodeId};
use crate::{node::Node, tenant_state::TenantState};
/// Scenarios in which we cannot find a suitable location for a tenant shard /// Scenarios in which we cannot find a suitable location for a tenant shard
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum ScheduleError { pub enum ScheduleError {
@@ -19,224 +19,52 @@ impl From<ScheduleError> for ApiError {
} }
} }
#[derive(Serialize, Eq, PartialEq)]
pub enum MaySchedule {
Yes(UtilizationScore),
No,
}
#[derive(Serialize)]
struct SchedulerNode {
/// How many shards are currently scheduled on this node, via their [`crate::tenant_state::IntentState`].
shard_count: usize,
/// Whether this node is currently elegible to have new shards scheduled (this is derived
/// from a node's availability state and scheduling policy).
may_schedule: MaySchedule,
}
impl PartialEq for SchedulerNode {
fn eq(&self, other: &Self) -> bool {
let may_schedule_matches = matches!(
(&self.may_schedule, &other.may_schedule),
(MaySchedule::Yes(_), MaySchedule::Yes(_)) | (MaySchedule::No, MaySchedule::No)
);
may_schedule_matches && self.shard_count == other.shard_count
}
}
impl Eq for SchedulerNode {}
/// This type is responsible for selecting which node is used when a tenant shard needs to choose a pageserver
/// on which to run.
///
/// The type has no persistent state of its own: this is all populated at startup. The Serialize
/// impl is only for debug dumps.
#[derive(Serialize)]
pub(crate) struct Scheduler { pub(crate) struct Scheduler {
nodes: HashMap<NodeId, SchedulerNode>, tenant_counts: HashMap<NodeId, usize>,
} }
impl Scheduler { impl Scheduler {
pub(crate) fn new<'a>(nodes: impl Iterator<Item = &'a Node>) -> Self { pub(crate) fn new(
let mut scheduler_nodes = HashMap::new(); tenants: &BTreeMap<TenantShardId, TenantState>,
for node in nodes { nodes: &HashMap<NodeId, Node>,
scheduler_nodes.insert( ) -> Self {
node.get_id(), let mut tenant_counts = HashMap::new();
SchedulerNode { for node_id in nodes.keys() {
shard_count: 0, tenant_counts.insert(*node_id, 0);
may_schedule: node.may_schedule(),
},
);
} }
Self { for tenant in tenants.values() {
nodes: scheduler_nodes, if let Some(ps) = tenant.intent.attached {
} let entry = tenant_counts.entry(ps).or_insert(0);
} *entry += 1;
/// For debug/support: check that our internal statistics are in sync with the state of
/// the nodes & tenant shards.
///
/// If anything is inconsistent, log details and return an error.
pub(crate) fn consistency_check<'a>(
&self,
nodes: impl Iterator<Item = &'a Node>,
shards: impl Iterator<Item = &'a TenantState>,
) -> anyhow::Result<()> {
let mut expect_nodes: HashMap<NodeId, SchedulerNode> = HashMap::new();
for node in nodes {
expect_nodes.insert(
node.get_id(),
SchedulerNode {
shard_count: 0,
may_schedule: node.may_schedule(),
},
);
}
for shard in shards {
if let Some(node_id) = shard.intent.get_attached() {
match expect_nodes.get_mut(node_id) {
Some(node) => node.shard_count += 1,
None => anyhow::bail!(
"Tenant {} references nonexistent node {}",
shard.tenant_shard_id,
node_id
),
}
}
for node_id in shard.intent.get_secondary() {
match expect_nodes.get_mut(node_id) {
Some(node) => node.shard_count += 1,
None => anyhow::bail!(
"Tenant {} references nonexistent node {}",
shard.tenant_shard_id,
node_id
),
}
} }
} }
for (node_id, expect_node) in &expect_nodes { for (node_id, node) in nodes {
let Some(self_node) = self.nodes.get(node_id) else { if !node.may_schedule() {
anyhow::bail!("Node {node_id} not found in Self") tenant_counts.remove(node_id);
};
if self_node != expect_node {
tracing::error!("Inconsistency detected in scheduling state for node {node_id}");
tracing::error!("Expected state: {}", serde_json::to_string(expect_node)?);
tracing::error!("Self state: {}", serde_json::to_string(self_node)?);
anyhow::bail!("Inconsistent state on {node_id}");
} }
} }
if expect_nodes.len() != self.nodes.len() { Self { tenant_counts }
// We just checked that all the expected nodes are present. If the lengths don't match,
// it means that we have nodes in Self that are unexpected.
for node_id in self.nodes.keys() {
if !expect_nodes.contains_key(node_id) {
anyhow::bail!("Node {node_id} found in Self but not in expected nodes");
}
}
}
Ok(())
} }
/// Increment the reference count of a node. This reference count is used to guide scheduling pub(crate) fn schedule_shard(
/// decisions, not for memory management: it represents one tenant shard whose IntentState targets &mut self,
/// this node. hard_exclude: &[NodeId],
/// ) -> Result<NodeId, ScheduleError> {
/// It is an error to call this for a node that is not known to the scheduler (i.e. passed into if self.tenant_counts.is_empty() {
/// [`Self::new`] or [`Self::node_upsert`])
pub(crate) fn node_inc_ref(&mut self, node_id: NodeId) {
let Some(node) = self.nodes.get_mut(&node_id) else {
tracing::error!("Scheduler missing node {node_id}");
debug_assert!(false);
return;
};
node.shard_count += 1;
}
/// Decrement a node's reference count. Inverse of [`Self::node_inc_ref`].
pub(crate) fn node_dec_ref(&mut self, node_id: NodeId) {
let Some(node) = self.nodes.get_mut(&node_id) else {
debug_assert!(false);
tracing::error!("Scheduler missing node {node_id}");
return;
};
node.shard_count -= 1;
}
pub(crate) fn node_upsert(&mut self, node: &Node) {
use std::collections::hash_map::Entry::*;
match self.nodes.entry(node.get_id()) {
Occupied(mut entry) => {
entry.get_mut().may_schedule = node.may_schedule();
}
Vacant(entry) => {
entry.insert(SchedulerNode {
shard_count: 0,
may_schedule: node.may_schedule(),
});
}
}
}
pub(crate) fn node_remove(&mut self, node_id: NodeId) {
if self.nodes.remove(&node_id).is_none() {
tracing::warn!(node_id=%node_id, "Removed non-existent node from scheduler");
}
}
/// Where we have several nodes to choose from, for example when picking a secondary location
/// to promote to an attached location, this method may be used to pick the best choice based
/// on the scheduler's knowledge of utilization and availability.
///
/// If the input is empty, or all the nodes are not elegible for scheduling, return None: the
/// caller can pick a node some other way.
pub(crate) fn node_preferred(&self, nodes: &[NodeId]) -> Option<NodeId> {
if nodes.is_empty() {
return None;
}
// TODO: When the utilization score returned by the pageserver becomes meaningful,
// schedule based on that instead of the shard count.
let node = nodes
.iter()
.map(|node_id| {
let may_schedule = self
.nodes
.get(node_id)
.map(|n| n.may_schedule != MaySchedule::No)
.unwrap_or(false);
(*node_id, may_schedule)
})
.max_by_key(|(_n, may_schedule)| *may_schedule);
// If even the preferred node has may_schedule==false, return None
node.and_then(|(node_id, may_schedule)| if may_schedule { Some(node_id) } else { None })
}
pub(crate) fn schedule_shard(&self, hard_exclude: &[NodeId]) -> Result<NodeId, ScheduleError> {
if self.nodes.is_empty() {
return Err(ScheduleError::NoPageservers); return Err(ScheduleError::NoPageservers);
} }
let mut tenant_counts: Vec<(NodeId, usize)> = self let mut tenant_counts: Vec<(NodeId, usize)> = self
.nodes .tenant_counts
.iter() .iter()
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
if hard_exclude.contains(k) || v.may_schedule == MaySchedule::No { if hard_exclude.contains(k) {
None None
} else { } else {
Some((*k, v.shard_count)) Some((*k, *v))
} }
}) })
.collect(); .collect();
@@ -245,108 +73,17 @@ impl Scheduler {
tenant_counts.sort_by_key(|i| (i.1, i.0)); tenant_counts.sort_by_key(|i| (i.1, i.0));
if tenant_counts.is_empty() { if tenant_counts.is_empty() {
// After applying constraints, no pageservers were left. We log some detail about // After applying constraints, no pageservers were left
// the state of nodes to help understand why this happened. This is not logged as an error because
// it is legitimately possible for enough nodes to be Offline to prevent scheduling a shard.
tracing::info!("Scheduling failure, while excluding {hard_exclude:?}, node states:");
for (node_id, node) in &self.nodes {
tracing::info!(
"Node {node_id}: may_schedule={} shards={}",
node.may_schedule != MaySchedule::No,
node.shard_count
);
}
return Err(ScheduleError::ImpossibleConstraint); return Err(ScheduleError::ImpossibleConstraint);
} }
for (node_id, count) in &tenant_counts {
tracing::info!("tenant_counts[{node_id}]={count}");
}
let node_id = tenant_counts.first().unwrap().0; let node_id = tenant_counts.first().unwrap().0;
tracing::info!( tracing::info!("scheduler selected node {node_id}");
"scheduler selected node {node_id} (elegible nodes {:?}, exclude: {hard_exclude:?})", *self.tenant_counts.get_mut(&node_id).unwrap() += 1;
tenant_counts.iter().map(|i| i.0 .0).collect::<Vec<_>>()
);
// Note that we do not update shard count here to reflect the scheduling: that
// is IntentState's job when the scheduled location is used.
Ok(node_id) Ok(node_id)
} }
} }
#[cfg(test)]
pub(crate) mod test_utils {
use crate::node::Node;
use pageserver_api::controller_api::{NodeAvailability, UtilizationScore};
use std::collections::HashMap;
use utils::id::NodeId;
/// Test helper: synthesize the requested number of nodes, all in active state.
///
/// Node IDs start at one.
pub(crate) fn make_test_nodes(n: u64) -> HashMap<NodeId, Node> {
(1..n + 1)
.map(|i| {
(NodeId(i), {
let mut node = Node::new(
NodeId(i),
format!("httphost-{i}"),
80 + i as u16,
format!("pghost-{i}"),
5432 + i as u16,
);
node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
assert!(node.is_available());
node
})
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tenant_state::IntentState;
#[test]
fn scheduler_basic() -> anyhow::Result<()> {
let nodes = test_utils::make_test_nodes(2);
let mut scheduler = Scheduler::new(nodes.values());
let mut t1_intent = IntentState::new();
let mut t2_intent = IntentState::new();
let scheduled = scheduler.schedule_shard(&[])?;
t1_intent.set_attached(&mut scheduler, Some(scheduled));
let scheduled = scheduler.schedule_shard(&[])?;
t2_intent.set_attached(&mut scheduler, Some(scheduled));
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1);
assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1);
let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers())?;
t1_intent.push_secondary(&mut scheduler, scheduled);
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1);
assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 2);
t1_intent.clear(&mut scheduler);
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0);
assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1);
if cfg!(debug_assertions) {
// Dropping an IntentState without clearing it causes a panic in debug mode,
// because we have failed to properly update scheduler shard counts.
let result = std::panic::catch_unwind(move || {
drop(t2_intent);
});
assert!(result.is_err());
} else {
t2_intent.clear(&mut scheduler);
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0);
assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 0);
}
Ok(())
}
}

View File

@@ -17,10 +17,9 @@ diesel::table! {
shard_number -> Int4, shard_number -> Int4,
shard_count -> Int4, shard_count -> Int4,
shard_stripe_size -> Int4, shard_stripe_size -> Int4,
generation -> Nullable<Int4>, generation -> Int4,
generation_pageserver -> Nullable<Int8>, generation_pageserver -> Int8,
placement_policy -> Varchar, placement_policy -> Varchar,
splitting -> Int2,
config -> Text, config -> Text,
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,54 +1,27 @@
use std::{ use std::{collections::HashMap, sync::Arc, time::Duration};
collections::{HashMap, HashSet},
sync::Arc,
time::Duration,
};
use crate::{ use control_plane::attachment_service::NodeAvailability;
metrics::{self, ReconcileCompleteLabelGroup, ReconcileOutcome},
persistence::TenantShardPersistence,
};
use pageserver_api::controller_api::PlacementPolicy;
use pageserver_api::{ use pageserver_api::{
models::{LocationConfig, LocationConfigMode, TenantConfig}, models::{LocationConfig, LocationConfigMode, TenantConfig},
shard::{ShardIdentity, TenantShardId}, shard::{ShardIdentity, TenantShardId},
}; };
use serde::Serialize;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::{instrument, Instrument};
use utils::{ use utils::{
generation::Generation, generation::Generation,
id::NodeId, id::NodeId,
seqwait::{SeqWait, SeqWaitError}, seqwait::{SeqWait, SeqWaitError},
sync::gate::Gate,
}; };
use crate::{ use crate::{
compute_hook::ComputeHook, compute_hook::ComputeHook,
node::Node, node::Node,
persistence::{split_state::SplitState, Persistence}, persistence::Persistence,
reconciler::{ reconciler::{attached_location_conf, secondary_location_conf, ReconcileError, Reconciler},
attached_location_conf, secondary_location_conf, ReconcileError, Reconciler, TargetState,
},
scheduler::{ScheduleError, Scheduler}, scheduler::{ScheduleError, Scheduler},
service, Sequence, service, PlacementPolicy, Sequence,
}; };
/// Serialization helper
fn read_mutex_content<S, T>(v: &std::sync::Mutex<T>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
T: Clone + std::fmt::Display,
{
serializer.collect_str(&v.lock().unwrap())
}
/// In-memory state for a particular tenant shard.
///
/// This struct implement Serialize for debugging purposes, but is _not_ persisted
/// itself: see [`crate::persistence`] for the subset of tenant shard state that is persisted.
#[derive(Serialize)]
pub(crate) struct TenantState { pub(crate) struct TenantState {
pub(crate) tenant_shard_id: TenantShardId, pub(crate) tenant_shard_id: TenantShardId,
@@ -60,11 +33,8 @@ pub(crate) struct TenantState {
pub(crate) sequence: Sequence, pub(crate) sequence: Sequence,
// Latest generation number: next time we attach, increment this // Latest generation number: next time we attach, increment this
// and use the incremented number when attaching. // and use the incremented number when attaching
// pub(crate) generation: Generation,
// None represents an incompletely onboarded tenant via the [`Service::location_config`]
// API, where this tenant may only run in PlacementPolicy::Secondary.
pub(crate) generation: Option<Generation>,
// High level description of how the tenant should be set up. Provided // High level description of how the tenant should be set up. Provided
// externally. // externally.
@@ -86,172 +56,30 @@ pub(crate) struct TenantState {
/// If a reconcile task is currently in flight, it may be joined here (it is /// If a reconcile task is currently in flight, it may be joined here (it is
/// only safe to join if either the result has been received or the reconciler's /// only safe to join if either the result has been received or the reconciler's
/// cancellation token has been fired) /// cancellation token has been fired)
#[serde(skip)]
pub(crate) reconciler: Option<ReconcilerHandle>, pub(crate) reconciler: Option<ReconcilerHandle>,
/// If a tenant is being split, then all shards with that TenantId will have a
/// SplitState set, this acts as a guard against other operations such as background
/// reconciliation, and timeline creation.
pub(crate) splitting: SplitState,
/// Optionally wait for reconciliation to complete up to a particular /// Optionally wait for reconciliation to complete up to a particular
/// sequence number. /// sequence number.
#[serde(skip)]
pub(crate) waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>, pub(crate) waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>,
/// Indicates sequence number for which we have encountered an error reconciling. If /// Indicates sequence number for which we have encountered an error reconciling. If
/// this advances ahead of [`Self::waiter`] then a reconciliation error has occurred, /// this advances ahead of [`Self::waiter`] then a reconciliation error has occurred,
/// and callers should stop waiting for `waiter` and propagate the error. /// and callers should stop waiting for `waiter` and propagate the error.
#[serde(skip)]
pub(crate) error_waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>, pub(crate) error_waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>,
/// The most recent error from a reconcile on this tenant /// The most recent error from a reconcile on this tenant
/// TODO: generalize to an array of recent events /// TODO: generalize to an array of recent events
/// TOOD: use a ArcSwap instead of mutex for faster reads? /// TOOD: use a ArcSwap instead of mutex for faster reads?
#[serde(serialize_with = "read_mutex_content")]
pub(crate) last_error: std::sync::Arc<std::sync::Mutex<String>>, pub(crate) last_error: std::sync::Arc<std::sync::Mutex<String>>,
/// If we have a pending compute notification that for some reason we weren't able to send,
/// set this to true. If this is set, calls to [`Self::maybe_reconcile`] will run a task to retry
/// sending it. This is the mechanism by which compute notifications are included in the scope
/// of state that we publish externally in an eventually consistent way.
pub(crate) pending_compute_notification: bool,
} }
#[derive(Default, Clone, Debug, Serialize)] #[derive(Default, Clone, Debug)]
pub(crate) struct IntentState { pub(crate) struct IntentState {
attached: Option<NodeId>, pub(crate) attached: Option<NodeId>,
secondary: Vec<NodeId>, pub(crate) secondary: Vec<NodeId>,
} }
impl IntentState { #[derive(Default, Clone)]
pub(crate) fn new() -> Self {
Self {
attached: None,
secondary: vec![],
}
}
pub(crate) fn single(scheduler: &mut Scheduler, node_id: Option<NodeId>) -> Self {
if let Some(node_id) = node_id {
scheduler.node_inc_ref(node_id);
}
Self {
attached: node_id,
secondary: vec![],
}
}
pub(crate) fn set_attached(&mut self, scheduler: &mut Scheduler, new_attached: Option<NodeId>) {
if self.attached != new_attached {
if let Some(old_attached) = self.attached.take() {
scheduler.node_dec_ref(old_attached);
}
if let Some(new_attached) = &new_attached {
scheduler.node_inc_ref(*new_attached);
}
self.attached = new_attached;
}
}
/// Like set_attached, but the node is from [`Self::secondary`]. This swaps the node from
/// secondary to attached while maintaining the scheduler's reference counts.
pub(crate) fn promote_attached(
&mut self,
_scheduler: &mut Scheduler,
promote_secondary: NodeId,
) {
// If we call this with a node that isn't in secondary, it would cause incorrect
// scheduler reference counting, since we assume the node is already referenced as a secondary.
debug_assert!(self.secondary.contains(&promote_secondary));
// TODO: when scheduler starts tracking attached + secondary counts separately, we will
// need to call into it here.
self.secondary.retain(|n| n != &promote_secondary);
self.attached = Some(promote_secondary);
}
pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) {
debug_assert!(!self.secondary.contains(&new_secondary));
scheduler.node_inc_ref(new_secondary);
self.secondary.push(new_secondary);
}
/// It is legal to call this with a node that is not currently a secondary: that is a no-op
pub(crate) fn remove_secondary(&mut self, scheduler: &mut Scheduler, node_id: NodeId) {
let index = self.secondary.iter().position(|n| *n == node_id);
if let Some(index) = index {
scheduler.node_dec_ref(node_id);
self.secondary.remove(index);
}
}
pub(crate) fn clear_secondary(&mut self, scheduler: &mut Scheduler) {
for secondary in self.secondary.drain(..) {
scheduler.node_dec_ref(secondary);
}
}
/// Remove the last secondary node from the list of secondaries
pub(crate) fn pop_secondary(&mut self, scheduler: &mut Scheduler) {
if let Some(node_id) = self.secondary.pop() {
scheduler.node_dec_ref(node_id);
}
}
pub(crate) fn clear(&mut self, scheduler: &mut Scheduler) {
if let Some(old_attached) = self.attached.take() {
scheduler.node_dec_ref(old_attached);
}
self.clear_secondary(scheduler);
}
pub(crate) fn all_pageservers(&self) -> Vec<NodeId> {
let mut result = Vec::new();
if let Some(p) = self.attached {
result.push(p)
}
result.extend(self.secondary.iter().copied());
result
}
pub(crate) fn get_attached(&self) -> &Option<NodeId> {
&self.attached
}
pub(crate) fn get_secondary(&self) -> &Vec<NodeId> {
&self.secondary
}
/// If the node is in use as the attached location, demote it into
/// the list of secondary locations. This is used when a node goes offline,
/// and we want to use a different node for attachment, but not permanently
/// forget the location on the offline node.
///
/// Returns true if a change was made
pub(crate) fn demote_attached(&mut self, node_id: NodeId) -> bool {
if self.attached == Some(node_id) {
// TODO: when scheduler starts tracking attached + secondary counts separately, we will
// need to call into it here.
self.attached = None;
self.secondary.push(node_id);
true
} else {
false
}
}
}
impl Drop for IntentState {
fn drop(&mut self) {
// Must clear before dropping, to avoid leaving stale refcounts in the Scheduler
debug_assert!(self.attached.is_none() && self.secondary.is_empty());
}
}
#[derive(Default, Clone, Serialize)]
pub(crate) struct ObservedState { pub(crate) struct ObservedState {
pub(crate) locations: HashMap<NodeId, ObservedStateLocation>, pub(crate) locations: HashMap<NodeId, ObservedStateLocation>,
} }
@@ -265,7 +93,7 @@ pub(crate) struct ObservedState {
/// what it is (e.g. we failed partway through configuring it) /// what it is (e.g. we failed partway through configuring it)
/// * Instance exists with conf==Some: this tells us what we last successfully configured on this node, /// * Instance exists with conf==Some: this tells us what we last successfully configured on this node,
/// and that configuration will still be present unless something external interfered. /// and that configuration will still be present unless something external interfered.
#[derive(Clone, Serialize)] #[derive(Clone)]
pub(crate) struct ObservedStateLocation { pub(crate) struct ObservedStateLocation {
/// If None, it means we do not know the status of this shard's location on this node, but /// If None, it means we do not know the status of this shard's location on this node, but
/// we know that we might have some state on this node. /// we know that we might have some state on this node.
@@ -334,11 +162,41 @@ pub(crate) struct ReconcileResult {
pub(crate) result: Result<(), ReconcileError>, pub(crate) result: Result<(), ReconcileError>,
pub(crate) tenant_shard_id: TenantShardId, pub(crate) tenant_shard_id: TenantShardId,
pub(crate) generation: Option<Generation>, pub(crate) generation: Generation,
pub(crate) observed: ObservedState, pub(crate) observed: ObservedState,
}
/// Set [`TenantState::pending_compute_notification`] from this flag impl IntentState {
pub(crate) pending_compute_notification: bool, pub(crate) fn new() -> Self {
Self {
attached: None,
secondary: vec![],
}
}
pub(crate) fn all_pageservers(&self) -> Vec<NodeId> {
let mut result = Vec::new();
if let Some(p) = self.attached {
result.push(p)
}
result.extend(self.secondary.iter().copied());
result
}
/// When a node goes offline, we update intents to avoid using it
/// as their attached pageserver.
///
/// Returns true if a change was made
pub(crate) fn notify_offline(&mut self, node_id: NodeId) -> bool {
if self.attached == Some(node_id) {
self.attached = None;
self.secondary.push(node_id);
true
} else {
false
}
}
} }
impl ObservedState { impl ObservedState {
@@ -359,17 +217,15 @@ impl TenantState {
tenant_shard_id, tenant_shard_id,
policy, policy,
intent: IntentState::default(), intent: IntentState::default(),
generation: Some(Generation::new(0)), generation: Generation::new(0),
shard, shard,
observed: ObservedState::default(), observed: ObservedState::default(),
config: TenantConfig::default(), config: TenantConfig::default(),
reconciler: None, reconciler: None,
splitting: SplitState::Idle,
sequence: Sequence(1), sequence: Sequence(1),
waiter: Arc::new(SeqWait::new(Sequence(0))), waiter: Arc::new(SeqWait::new(Sequence(0))),
error_waiter: Arc::new(SeqWait::new(Sequence(0))), error_waiter: Arc::new(SeqWait::new(Sequence(0))),
last_error: Arc::default(), last_error: Arc::default(),
pending_compute_notification: false,
} }
} }
@@ -377,7 +233,7 @@ impl TenantState {
/// [`ObservedState`], even if it violates my [`PlacementPolicy`]. Call [`Self::schedule`] next, /// [`ObservedState`], even if it violates my [`PlacementPolicy`]. Call [`Self::schedule`] next,
/// to get an intent state that complies with placement policy. The overall goal is to do scheduling /// to get an intent state that complies with placement policy. The overall goal is to do scheduling
/// in a way that makes use of any configured locations that already exist in the outside world. /// in a way that makes use of any configured locations that already exist in the outside world.
pub(crate) fn intent_from_observed(&mut self, scheduler: &mut Scheduler) { pub(crate) fn intent_from_observed(&mut self) {
// Choose an attached location by filtering observed locations, and then sorting to get the highest // Choose an attached location by filtering observed locations, and then sorting to get the highest
// generation // generation
let mut attached_locs = self let mut attached_locs = self
@@ -402,116 +258,69 @@ impl TenantState {
attached_locs.sort_by_key(|i| i.1); attached_locs.sort_by_key(|i| i.1);
if let Some((node_id, _gen)) = attached_locs.into_iter().last() { if let Some((node_id, _gen)) = attached_locs.into_iter().last() {
self.intent.set_attached(scheduler, Some(*node_id)); self.intent.attached = Some(*node_id);
} }
// All remaining observed locations generate secondary intents. This includes None // All remaining observed locations generate secondary intents. This includes None
// observations, as these may well have some local content on disk that is usable (this // observations, as these may well have some local content on disk that is usable (this
// is an edge case that might occur if we restarted during a migration or other change) // is an edge case that might occur if we restarted during a migration or other change)
//
// We may leave intent.attached empty if we didn't find any attached locations: [`Self::schedule`]
// will take care of promoting one of these secondaries to be attached.
self.observed.locations.keys().for_each(|node_id| { self.observed.locations.keys().for_each(|node_id| {
if Some(*node_id) != self.intent.attached { if Some(*node_id) != self.intent.attached {
self.intent.push_secondary(scheduler, *node_id); self.intent.secondary.push(*node_id);
} }
}); });
} }
/// Part of [`Self::schedule`] that is used to choose exactly one node to act as the
/// attached pageserver for a shard.
///
/// Returns whether we modified it, and the NodeId selected.
fn schedule_attached(
&mut self,
scheduler: &mut Scheduler,
) -> Result<(bool, NodeId), ScheduleError> {
// No work to do if we already have an attached tenant
if let Some(node_id) = self.intent.attached {
return Ok((false, node_id));
}
if let Some(promote_secondary) = scheduler.node_preferred(&self.intent.secondary) {
// Promote a secondary
tracing::debug!("Promoted secondary {} to attached", promote_secondary);
self.intent.promote_attached(scheduler, promote_secondary);
Ok((true, promote_secondary))
} else {
// Pick a fresh node: either we had no secondaries or none were schedulable
let node_id = scheduler.schedule_shard(&self.intent.secondary)?;
tracing::debug!("Selected {} as attached", node_id);
self.intent.set_attached(scheduler, Some(node_id));
Ok((true, node_id))
}
}
pub(crate) fn schedule(&mut self, scheduler: &mut Scheduler) -> Result<(), ScheduleError> { pub(crate) fn schedule(&mut self, scheduler: &mut Scheduler) -> Result<(), ScheduleError> {
// TODO: before scheduling new nodes, check if any existing content in // TODO: before scheduling new nodes, check if any existing content in
// self.intent refers to pageservers that are offline, and pick other // self.intent refers to pageservers that are offline, and pick other
// pageservers if so. // pageservers if so.
// TODO: respect the splitting bit on tenants: if they are currently splitting then we may not
// change their attach location.
// Build the set of pageservers already in use by this tenant, to avoid scheduling // Build the set of pageservers already in use by this tenant, to avoid scheduling
// more work on the same pageservers we're already using. // more work on the same pageservers we're already using.
let mut used_pageservers = self.intent.all_pageservers();
let mut modified = false; let mut modified = false;
// Add/remove nodes to fulfil policy
use PlacementPolicy::*; use PlacementPolicy::*;
match self.policy { match self.policy {
Attached(secondary_count) => { Single => {
let retain_secondaries = if self.intent.attached.is_none() // Should have exactly one attached, and zero secondaries
&& scheduler.node_preferred(&self.intent.secondary).is_some() if self.intent.attached.is_none() {
{ let node_id = scheduler.schedule_shard(&used_pageservers)?;
// If we have no attached, and one of the secondaries is elegible to be promoted, retain self.intent.attached = Some(node_id);
// one more secondary than we usually would, as one of them will become attached futher down this function. used_pageservers.push(node_id);
secondary_count + 1 modified = true;
} else { }
secondary_count if !self.intent.secondary.is_empty() {
}; self.intent.secondary.clear();
modified = true;
while self.intent.secondary.len() > retain_secondaries { }
// We have no particular preference for one secondary location over another: just }
// arbitrarily drop from the end Double(secondary_count) => {
self.intent.pop_secondary(scheduler); // Should have exactly one attached, and N secondaries
if self.intent.attached.is_none() {
let node_id = scheduler.schedule_shard(&used_pageservers)?;
self.intent.attached = Some(node_id);
used_pageservers.push(node_id);
modified = true; modified = true;
} }
// Should have exactly one attached, and N secondaries
let (modified_attached, attached_node_id) = self.schedule_attached(scheduler)?;
modified |= modified_attached;
let mut used_pageservers = vec![attached_node_id];
while self.intent.secondary.len() < secondary_count { while self.intent.secondary.len() < secondary_count {
let node_id = scheduler.schedule_shard(&used_pageservers)?; let node_id = scheduler.schedule_shard(&used_pageservers)?;
self.intent.push_secondary(scheduler, node_id); self.intent.secondary.push(node_id);
used_pageservers.push(node_id); used_pageservers.push(node_id);
modified = true; modified = true;
} }
} }
Secondary => {
if let Some(node_id) = self.intent.get_attached() {
// Populate secondary by demoting the attached node
self.intent.demote_attached(*node_id);
modified = true;
} else if self.intent.secondary.is_empty() {
// Populate secondary by scheduling a fresh node
let node_id = scheduler.schedule_shard(&[])?;
self.intent.push_secondary(scheduler, node_id);
modified = true;
}
while self.intent.secondary.len() > 1 {
// We have no particular preference for one secondary location over another: just
// arbitrarily drop from the end
self.intent.pop_secondary(scheduler);
modified = true;
}
}
Detached => { Detached => {
// Never add locations in this mode // Should have no attached or secondary pageservers
if self.intent.get_attached().is_some() || !self.intent.get_secondary().is_empty() { if self.intent.attached.is_some() {
self.intent.clear(scheduler); self.intent.attached = None;
modified = true;
}
if !self.intent.secondary.is_empty() {
self.intent.secondary.clear();
modified = true; modified = true;
} }
} }
@@ -524,57 +333,13 @@ impl TenantState {
Ok(()) Ok(())
} }
/// Query whether the tenant's observed state for attached node matches its intent state, and if so, fn dirty(&self) -> bool {
/// yield the node ID. This is appropriate for emitting compute hook notifications: we are checking that
/// the node in question is not only where we intend to attach, but that the tenant is indeed already attached there.
///
/// Reconciliation may still be needed for other aspects of state such as secondaries (see [`Self::dirty`]): this
/// funciton should not be used to decide whether to reconcile.
pub(crate) fn stably_attached(&self) -> Option<NodeId> {
if let Some(attach_intent) = self.intent.attached {
match self.observed.locations.get(&attach_intent) {
Some(loc) => match &loc.conf {
Some(conf) => match conf.mode {
LocationConfigMode::AttachedMulti
| LocationConfigMode::AttachedSingle
| LocationConfigMode::AttachedStale => {
// Our intent and observed state agree that this node is in an attached state.
Some(attach_intent)
}
// Our observed config is not an attached state
_ => None,
},
// Our observed state is None, i.e. in flux
None => None,
},
// We have no observed state for this node
None => None,
}
} else {
// Our intent is not to attach
None
}
}
fn dirty(&self, nodes: &Arc<HashMap<NodeId, Node>>) -> bool {
let mut dirty_nodes = HashSet::new();
if let Some(node_id) = self.intent.attached { if let Some(node_id) = self.intent.attached {
// Maybe panic: it is a severe bug if we try to attach while generation is null. let wanted_conf = attached_location_conf(self.generation, &self.shard, &self.config);
let generation = self
.generation
.expect("Attempted to enter attached state without a generation");
let wanted_conf = attached_location_conf(
generation,
&self.shard,
&self.config,
!self.intent.secondary.is_empty(),
);
match self.observed.locations.get(&node_id) { match self.observed.locations.get(&node_id) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {} Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {}
Some(_) | None => { Some(_) | None => {
dirty_nodes.insert(node_id); return true;
} }
} }
} }
@@ -584,39 +349,21 @@ impl TenantState {
match self.observed.locations.get(node_id) { match self.observed.locations.get(node_id) {
Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {} Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {}
Some(_) | None => { Some(_) | None => {
dirty_nodes.insert(*node_id); return true;
} }
} }
} }
for node_id in self.observed.locations.keys() { false
if self.intent.attached != Some(*node_id) && !self.intent.secondary.contains(node_id) {
// We have observed state that isn't part of our intent: need to clean it up.
dirty_nodes.insert(*node_id);
}
}
dirty_nodes.retain(|node_id| {
nodes
.get(node_id)
.map(|n| n.is_available())
.unwrap_or(false)
});
!dirty_nodes.is_empty()
} }
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
pub(crate) fn maybe_reconcile( pub(crate) fn maybe_reconcile(
&mut self, &mut self,
result_tx: &tokio::sync::mpsc::UnboundedSender<ReconcileResult>, result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResult>,
pageservers: &Arc<HashMap<NodeId, Node>>, pageservers: &Arc<HashMap<NodeId, Node>>,
compute_hook: &Arc<ComputeHook>, compute_hook: &Arc<ComputeHook>,
service_config: &service::Config, service_config: &service::Config,
persistence: &Arc<Persistence>, persistence: &Arc<Persistence>,
gate: &Gate,
cancel: &CancellationToken,
) -> Option<ReconcilerWaiter> { ) -> Option<ReconcilerWaiter> {
// If there are any ambiguous observed states, and the nodes they refer to are available, // If there are any ambiguous observed states, and the nodes they refer to are available,
// we should reconcile to clean them up. // we should reconcile to clean them up.
@@ -625,39 +372,22 @@ impl TenantState {
let node = pageservers let node = pageservers
.get(node_id) .get(node_id)
.expect("Nodes may not be removed while referenced"); .expect("Nodes may not be removed while referenced");
if observed_loc.conf.is_none() && node.is_available() { if observed_loc.conf.is_none()
&& !matches!(node.availability, NodeAvailability::Offline)
{
dirty_observed = true; dirty_observed = true;
break; break;
} }
} }
let active_nodes_dirty = self.dirty(pageservers); if !self.dirty() && !dirty_observed {
// Even if there is no pageserver work to be done, if we have a pending notification to computes,
// wake up a reconciler to send it.
let do_reconcile =
active_nodes_dirty || dirty_observed || self.pending_compute_notification;
if !do_reconcile {
tracing::info!("Not dirty, no reconciliation needed."); tracing::info!("Not dirty, no reconciliation needed.");
return None; return None;
} }
// If we are currently splitting, then never start a reconciler task: the splitting logic
// requires that shards are not interfered with while it runs. Do this check here rather than
// up top, so that we only log this message if we would otherwise have done a reconciliation.
if !matches!(self.splitting, SplitState::Idle) {
tracing::info!("Refusing to reconcile, splitting in progress");
return None;
}
// Reconcile already in flight for the current sequence? // Reconcile already in flight for the current sequence?
if let Some(handle) = &self.reconciler { if let Some(handle) = &self.reconciler {
if handle.sequence == self.sequence { if handle.sequence == self.sequence {
tracing::info!(
"Reconciliation already in progress for sequence {:?}",
self.sequence,
);
return Some(ReconcilerWaiter { return Some(ReconcilerWaiter {
tenant_shard_id: self.tenant_shard_id, tenant_shard_id: self.tenant_shard_id,
seq_wait: self.waiter.clone(), seq_wait: self.waiter.clone(),
@@ -668,131 +398,62 @@ impl TenantState {
} }
} }
// Build list of nodes from which the reconciler should detach
let mut detach = Vec::new();
for node_id in self.observed.locations.keys() {
if self.intent.get_attached() != &Some(*node_id)
&& !self.intent.secondary.contains(node_id)
{
detach.push(
pageservers
.get(node_id)
.expect("Intent references non-existent pageserver")
.clone(),
)
}
}
// Reconcile in flight for a stale sequence? Our sequence's task will wait for it before // Reconcile in flight for a stale sequence? Our sequence's task will wait for it before
// doing our sequence's work. // doing our sequence's work.
let old_handle = self.reconciler.take(); let old_handle = self.reconciler.take();
let Ok(gate_guard) = gate.enter() else { let cancel = CancellationToken::new();
// Shutting down, don't start a reconciler
return None;
};
// Advance the sequence before spawning a reconciler, so that sequence waiters
// can distinguish between before+after the reconcile completes.
self.sequence = self.sequence.next();
let reconciler_cancel = cancel.child_token();
let reconciler_intent = TargetState::from_intent(pageservers, &self.intent);
let mut reconciler = Reconciler { let mut reconciler = Reconciler {
tenant_shard_id: self.tenant_shard_id, tenant_shard_id: self.tenant_shard_id,
shard: self.shard, shard: self.shard,
generation: self.generation, generation: self.generation,
intent: reconciler_intent, intent: self.intent.clone(),
detach,
config: self.config.clone(), config: self.config.clone(),
observed: self.observed.clone(), observed: self.observed.clone(),
pageservers: pageservers.clone(),
compute_hook: compute_hook.clone(), compute_hook: compute_hook.clone(),
service_config: service_config.clone(), service_config: service_config.clone(),
_gate_guard: gate_guard, cancel: cancel.clone(),
cancel: reconciler_cancel.clone(),
persistence: persistence.clone(), persistence: persistence.clone(),
compute_notify_failure: false,
}; };
let reconcile_seq = self.sequence; let reconcile_seq = self.sequence;
tracing::info!(seq=%reconcile_seq, "Spawning Reconciler for sequence {}", self.sequence); tracing::info!("Spawning Reconciler for sequence {}", self.sequence);
let must_notify = self.pending_compute_notification; let join_handle = tokio::task::spawn(async move {
let reconciler_span = tracing::info_span!(parent: None, "reconciler", seq=%reconcile_seq, // Wait for any previous reconcile task to complete before we start
tenant_id=%reconciler.tenant_shard_id.tenant_id, if let Some(old_handle) = old_handle {
shard_id=%reconciler.tenant_shard_id.shard_slug()); old_handle.cancel.cancel();
metrics::METRICS_REGISTRY if let Err(e) = old_handle.handle.await {
.metrics_group // We can't do much with this other than log it: the task is done, so
.storage_controller_reconcile_spawn // we may proceed with our work.
.inc(); tracing::error!("Unexpected join error waiting for reconcile task: {e}");
let result_tx = result_tx.clone();
let join_handle = tokio::task::spawn(
async move {
// Wait for any previous reconcile task to complete before we start
if let Some(old_handle) = old_handle {
old_handle.cancel.cancel();
if let Err(e) = old_handle.handle.await {
// We can't do much with this other than log it: the task is done, so
// we may proceed with our work.
tracing::error!("Unexpected join error waiting for reconcile task: {e}");
}
} }
// Early check for cancellation before doing any work
// TODO: wrap all remote API operations in cancellation check
// as well.
if reconciler.cancel.is_cancelled() {
metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_reconcile_complete
.inc(ReconcileCompleteLabelGroup {
status: ReconcileOutcome::Cancel,
});
return;
}
// Attempt to make observed state match intent state
let result = reconciler.reconcile().await;
// If we know we had a pending compute notification from some previous action, send a notification irrespective
// of whether the above reconcile() did any work
if result.is_ok() && must_notify {
// If this fails we will send the need to retry in [`ReconcileResult::pending_compute_notification`]
reconciler.compute_notify().await.ok();
}
// Update result counter
let outcome_label = match &result {
Ok(_) => ReconcileOutcome::Success,
Err(ReconcileError::Cancel) => ReconcileOutcome::Cancel,
Err(_) => ReconcileOutcome::Error,
};
metrics::METRICS_REGISTRY
.metrics_group
.storage_controller_reconcile_complete
.inc(ReconcileCompleteLabelGroup {
status: outcome_label,
});
result_tx
.send(ReconcileResult {
sequence: reconcile_seq,
result,
tenant_shard_id: reconciler.tenant_shard_id,
generation: reconciler.generation,
observed: reconciler.observed,
pending_compute_notification: reconciler.compute_notify_failure,
})
.ok();
} }
.instrument(reconciler_span),
); // Early check for cancellation before doing any work
// TODO: wrap all remote API operations in cancellation check
// as well.
if reconciler.cancel.is_cancelled() {
return;
}
let result = reconciler.reconcile().await;
result_tx
.send(ReconcileResult {
sequence: reconcile_seq,
result,
tenant_shard_id: reconciler.tenant_shard_id,
generation: reconciler.generation,
observed: reconciler.observed,
})
.ok();
});
self.reconciler = Some(ReconcilerHandle { self.reconciler = Some(ReconcilerHandle {
sequence: self.sequence, sequence: self.sequence,
handle: join_handle, handle: join_handle,
cancel: reconciler_cancel, cancel,
}); });
Some(ReconcilerWaiter { Some(ReconcilerWaiter {
@@ -803,181 +464,4 @@ impl TenantState {
seq: self.sequence, seq: self.sequence,
}) })
} }
/// Called when a ReconcileResult has been emitted and the service is updating
/// our state: if the result is from a sequence >= my ReconcileHandle, then drop
/// the handle to indicate there is no longer a reconciliation in progress.
pub(crate) fn reconcile_complete(&mut self, sequence: Sequence) {
if let Some(reconcile_handle) = &self.reconciler {
if reconcile_handle.sequence <= sequence {
self.reconciler = None;
}
}
}
// If we had any state at all referring to this node ID, drop it. Does not
// attempt to reschedule.
pub(crate) fn deref_node(&mut self, node_id: NodeId) {
if self.intent.attached == Some(node_id) {
self.intent.attached = None;
}
self.intent.secondary.retain(|n| n != &node_id);
self.observed.locations.remove(&node_id);
debug_assert!(!self.intent.all_pageservers().contains(&node_id));
}
pub(crate) fn to_persistent(&self) -> TenantShardPersistence {
TenantShardPersistence {
tenant_id: self.tenant_shard_id.tenant_id.to_string(),
shard_number: self.tenant_shard_id.shard_number.0 as i32,
shard_count: self.tenant_shard_id.shard_count.literal() as i32,
shard_stripe_size: self.shard.stripe_size.0 as i32,
generation: self.generation.map(|g| g.into().unwrap_or(0) as i32),
generation_pageserver: self.intent.get_attached().map(|n| n.0 as i64),
placement_policy: serde_json::to_string(&self.policy).unwrap(),
config: serde_json::to_string(&self.config).unwrap(),
splitting: SplitState::default(),
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use pageserver_api::{
controller_api::NodeAvailability,
shard::{ShardCount, ShardNumber},
};
use utils::id::TenantId;
use crate::scheduler::test_utils::make_test_nodes;
use super::*;
fn make_test_tenant_shard(policy: PlacementPolicy) -> TenantState {
let tenant_id = TenantId::generate();
let shard_number = ShardNumber(0);
let shard_count = ShardCount::new(1);
let tenant_shard_id = TenantShardId {
tenant_id,
shard_number,
shard_count,
};
TenantState::new(
tenant_shard_id,
ShardIdentity::new(
shard_number,
shard_count,
pageserver_api::shard::ShardStripeSize(32768),
)
.unwrap(),
policy,
)
}
/// Test the scheduling behaviors used when a tenant configured for HA is subject
/// to nodes being marked offline.
#[test]
fn tenant_ha_scheduling() -> anyhow::Result<()> {
// Start with three nodes. Our tenant will only use two. The third one is
// expected to remain unused.
let mut nodes = make_test_nodes(3);
let mut scheduler = Scheduler::new(nodes.values());
let mut tenant_state = make_test_tenant_shard(PlacementPolicy::Attached(1));
tenant_state
.schedule(&mut scheduler)
.expect("we have enough nodes, scheduling should work");
// Expect to initially be schedule on to different nodes
assert_eq!(tenant_state.intent.secondary.len(), 1);
assert!(tenant_state.intent.attached.is_some());
let attached_node_id = tenant_state.intent.attached.unwrap();
let secondary_node_id = *tenant_state.intent.secondary.iter().last().unwrap();
assert_ne!(attached_node_id, secondary_node_id);
// Notifying the attached node is offline should demote it to a secondary
let changed = tenant_state.intent.demote_attached(attached_node_id);
assert!(changed);
assert!(tenant_state.intent.attached.is_none());
assert_eq!(tenant_state.intent.secondary.len(), 2);
// Update the scheduler state to indicate the node is offline
nodes
.get_mut(&attached_node_id)
.unwrap()
.set_availability(NodeAvailability::Offline);
scheduler.node_upsert(nodes.get(&attached_node_id).unwrap());
// Scheduling the node should promote the still-available secondary node to attached
tenant_state
.schedule(&mut scheduler)
.expect("active nodes are available");
assert_eq!(tenant_state.intent.attached.unwrap(), secondary_node_id);
// The original attached node should have been retained as a secondary
assert_eq!(
*tenant_state.intent.secondary.iter().last().unwrap(),
attached_node_id
);
tenant_state.intent.clear(&mut scheduler);
Ok(())
}
#[test]
fn intent_from_observed() -> anyhow::Result<()> {
let nodes = make_test_nodes(3);
let mut scheduler = Scheduler::new(nodes.values());
let mut tenant_state = make_test_tenant_shard(PlacementPolicy::Attached(1));
tenant_state.observed.locations.insert(
NodeId(3),
ObservedStateLocation {
conf: Some(LocationConfig {
mode: LocationConfigMode::AttachedMulti,
generation: Some(2),
secondary_conf: None,
shard_number: tenant_state.shard.number.0,
shard_count: tenant_state.shard.count.literal(),
shard_stripe_size: tenant_state.shard.stripe_size.0,
tenant_conf: TenantConfig::default(),
}),
},
);
tenant_state.observed.locations.insert(
NodeId(2),
ObservedStateLocation {
conf: Some(LocationConfig {
mode: LocationConfigMode::AttachedStale,
generation: Some(1),
secondary_conf: None,
shard_number: tenant_state.shard.number.0,
shard_count: tenant_state.shard.count.literal(),
shard_stripe_size: tenant_state.shard.stripe_size.0,
tenant_conf: TenantConfig::default(),
}),
},
);
tenant_state.intent_from_observed(&mut scheduler);
// The highest generationed attached location gets used as attached
assert_eq!(tenant_state.intent.attached, Some(NodeId(3)));
// Other locations get used as secondary
assert_eq!(tenant_state.intent.secondary, vec![NodeId(2)]);
scheduler.consistency_check(nodes.values(), [&tenant_state].into_iter())?;
tenant_state.intent.clear(&mut scheduler);
Ok(())
}
} }

View File

@@ -1,45 +1,40 @@
use crate::{background_process, local_env::LocalEnv}; use crate::{background_process, local_env::LocalEnv};
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use diesel::{
backend::Backend,
query_builder::{AstPass, QueryFragment, QueryId},
Connection, PgConnection, QueryResult, RunQueryDsl,
};
use diesel_migrations::{HarnessWithOutput, MigrationHarness};
use hyper::Method; use hyper::Method;
use pageserver_api::{ use pageserver_api::{
controller_api::{ models::{ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo},
NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse, shard::TenantShardId,
TenantShardMigrateRequest, TenantShardMigrateResponse,
},
models::{
TenantCreateRequest, TenantShardSplitRequest, TenantShardSplitResponse,
TimelineCreateRequest, TimelineInfo,
},
shard::{ShardStripeSize, TenantShardId},
}; };
use pageserver_client::mgmt_api::ResponseErrorMessageExt; use pageserver_client::mgmt_api::ResponseErrorMessageExt;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{fs, str::FromStr}; use std::{env, str::FromStr};
use tokio::process::Command; use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use url::Url;
use utils::{ use utils::{
auth::{encode_from_key_file, Claims, Scope}, auth::{Claims, Scope},
id::{NodeId, TenantId}, id::{NodeId, TenantId},
}; };
pub struct StorageController { pub struct AttachmentService {
env: LocalEnv, env: LocalEnv,
listen: String, listen: String,
path: Utf8PathBuf, path: Utf8PathBuf,
private_key: Option<Vec<u8>>, jwt_token: Option<String>,
public_key: Option<String>, public_key_path: Option<Utf8PathBuf>,
postgres_port: u16, postgres_port: u16,
client: reqwest::Client, client: reqwest::Client,
} }
const COMMAND: &str = "storage_controller"; const COMMAND: &str = "attachment_service";
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; const ATTACHMENT_SERVICE_POSTGRES_VERSION: u32 = 16;
// Use a shorter pageserver unavailability interval than the default to speed up tests.
const NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct AttachHookRequest { pub struct AttachHookRequest {
@@ -62,7 +57,126 @@ pub struct InspectResponse {
pub attachment: Option<(u32, NodeId)>, pub attachment: Option<(u32, NodeId)>,
} }
impl StorageController { #[derive(Serialize, Deserialize)]
pub struct TenantCreateResponseShard {
pub node_id: NodeId,
pub generation: u32,
}
#[derive(Serialize, Deserialize)]
pub struct TenantCreateResponse {
pub shards: Vec<TenantCreateResponseShard>,
}
#[derive(Serialize, Deserialize)]
pub struct NodeRegisterRequest {
pub node_id: NodeId,
pub listen_pg_addr: String,
pub listen_pg_port: u16,
pub listen_http_addr: String,
pub listen_http_port: u16,
}
#[derive(Serialize, Deserialize)]
pub struct NodeConfigureRequest {
pub node_id: NodeId,
pub availability: Option<NodeAvailability>,
pub scheduling: Option<NodeSchedulingPolicy>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TenantLocateResponseShard {
pub shard_id: TenantShardId,
pub node_id: NodeId,
pub listen_pg_addr: String,
pub listen_pg_port: u16,
pub listen_http_addr: String,
pub listen_http_port: u16,
}
#[derive(Serialize, Deserialize)]
pub struct TenantLocateResponse {
pub shards: Vec<TenantLocateResponseShard>,
pub shard_params: ShardParameters,
}
/// Explicitly migrating a particular shard is a low level operation
/// TODO: higher level "Reschedule tenant" operation where the request
/// specifies some constraints, e.g. asking it to get off particular node(s)
#[derive(Serialize, Deserialize, Debug)]
pub struct TenantShardMigrateRequest {
pub tenant_shard_id: TenantShardId,
pub node_id: NodeId,
}
#[derive(Serialize, Deserialize, Clone, Copy)]
pub enum NodeAvailability {
// Normal, happy state
Active,
// Offline: Tenants shouldn't try to attach here, but they may assume that their
// secondary locations on this node still exist. Newly added nodes are in this
// state until we successfully contact them.
Offline,
}
impl FromStr for NodeAvailability {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"active" => Ok(Self::Active),
"offline" => Ok(Self::Offline),
_ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
}
}
}
/// FIXME: this is a duplicate of the type in the attachment_service crate, because the
/// type needs to be defined with diesel traits in there.
#[derive(Serialize, Deserialize, Clone, Copy)]
pub enum NodeSchedulingPolicy {
Active,
Filling,
Pause,
Draining,
}
impl FromStr for NodeSchedulingPolicy {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"active" => Ok(Self::Active),
"filling" => Ok(Self::Filling),
"pause" => Ok(Self::Pause),
"draining" => Ok(Self::Draining),
_ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
}
}
}
impl From<NodeSchedulingPolicy> for String {
fn from(value: NodeSchedulingPolicy) -> String {
use NodeSchedulingPolicy::*;
match value {
Active => "active",
Filling => "filling",
Pause => "pause",
Draining => "draining",
}
.to_string()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TenantShardMigrateResponse {}
impl AttachmentService {
pub fn from_env(env: &LocalEnv) -> Self { pub fn from_env(env: &LocalEnv) -> Self {
let path = Utf8PathBuf::from_path_buf(env.base_data_dir.clone()) let path = Utf8PathBuf::from_path_buf(env.base_data_dir.clone())
.unwrap() .unwrap()
@@ -91,37 +205,19 @@ impl StorageController {
.pageservers .pageservers
.first() .first()
.expect("Config is validated to contain at least one pageserver"); .expect("Config is validated to contain at least one pageserver");
let (private_key, public_key) = match ps_conf.http_auth_type { let (jwt_token, public_key_path) = match ps_conf.http_auth_type {
AuthType::Trust => (None, None), AuthType::Trust => (None, None),
AuthType::NeonJWT => { AuthType::NeonJWT => {
let private_key_path = env.get_private_key_path(); let jwt_token = env
let private_key = fs::read(private_key_path).expect("failed to read private key"); .generate_auth_token(&Claims::new(None, Scope::PageServerApi))
.unwrap();
// If pageserver auth is enabled, this implicitly enables auth for this service, // If pageserver auth is enabled, this implicitly enables auth for this service,
// using the same credentials. // using the same credentials.
let public_key_path = let public_key_path =
camino::Utf8PathBuf::try_from(env.base_data_dir.join("auth_public_key.pem")) camino::Utf8PathBuf::try_from(env.base_data_dir.join("auth_public_key.pem"))
.unwrap(); .unwrap();
(Some(jwt_token), Some(public_key_path))
// This service takes keys as a string rather than as a path to a file/dir: read the key into memory.
let public_key = if std::fs::metadata(&public_key_path)
.expect("Can't stat public key")
.is_dir()
{
// Our config may specify a directory: this is for the pageserver's ability to handle multiple
// keys. We only use one key at a time, so, arbitrarily load the first one in the directory.
let mut dir =
std::fs::read_dir(&public_key_path).expect("Can't readdir public key path");
let dent = dir
.next()
.expect("Empty key dir")
.expect("Error reading key dir");
std::fs::read_to_string(dent.path()).expect("Can't read public key")
} else {
std::fs::read_to_string(&public_key_path).expect("Can't read public key")
};
(Some(private_key), Some(public_key))
} }
}; };
@@ -129,8 +225,8 @@ impl StorageController {
env: env.clone(), env: env.clone(),
path, path,
listen, listen,
private_key, jwt_token,
public_key, public_key_path,
postgres_port, postgres_port,
client: reqwest::ClientBuilder::new() client: reqwest::ClientBuilder::new()
.build() .build()
@@ -139,27 +235,58 @@ impl StorageController {
} }
fn pid_file(&self) -> Utf8PathBuf { fn pid_file(&self) -> Utf8PathBuf {
Utf8PathBuf::from_path_buf(self.env.base_data_dir.join("storage_controller.pid")) Utf8PathBuf::from_path_buf(self.env.base_data_dir.join("attachment_service.pid"))
.expect("non-Unicode path") .expect("non-Unicode path")
} }
/// PIDFile for the postgres instance used to store storage controller state /// PIDFile for the postgres instance used to store attachment service state
fn postgres_pid_file(&self) -> Utf8PathBuf { fn postgres_pid_file(&self) -> Utf8PathBuf {
Utf8PathBuf::from_path_buf( Utf8PathBuf::from_path_buf(
self.env self.env
.base_data_dir .base_data_dir
.join("storage_controller_postgres.pid"), .join("attachment_service_postgres.pid"),
) )
.expect("non-Unicode path") .expect("non-Unicode path")
} }
/// In order to access database migrations, we need to find the Neon source tree
async fn find_source_root(&self) -> anyhow::Result<Utf8PathBuf> {
// We assume that either prd or our binary is in the source tree. The former is usually
// true for automated test runners, the latter is usually true for developer workstations. Often
// both are true, which is fine.
let candidate_start_points = [
// Current working directory
Utf8PathBuf::from_path_buf(std::env::current_dir()?).unwrap(),
// Directory containing the binary we're running inside
Utf8PathBuf::from_path_buf(env::current_exe()?.parent().unwrap().to_owned()).unwrap(),
];
// For each candidate start point, search through ancestors looking for a neon.git source tree root
for start_point in &candidate_start_points {
// Start from the build dir: assumes we are running out of a built neon source tree
for path in start_point.ancestors() {
// A crude approximation: the root of the source tree is whatever contains a "control_plane"
// subdirectory.
let control_plane = path.join("control_plane");
if tokio::fs::try_exists(&control_plane).await? {
return Ok(path.to_owned());
}
}
}
// Fall-through
Err(anyhow::anyhow!(
"Could not find control_plane src dir, after searching ancestors of {candidate_start_points:?}"
))
}
/// Find the directory containing postgres binaries, such as `initdb` and `pg_ctl` /// Find the directory containing postgres binaries, such as `initdb` and `pg_ctl`
/// ///
/// This usually uses STORAGE_CONTROLLER_POSTGRES_VERSION of postgres, but will fall back /// This usually uses ATTACHMENT_SERVICE_POSTGRES_VERSION of postgres, but will fall back
/// to other versions if that one isn't found. Some automated tests create circumstances /// to other versions if that one isn't found. Some automated tests create circumstances
/// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`. /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
pub async fn get_pg_bin_dir(&self) -> anyhow::Result<Utf8PathBuf> { pub async fn get_pg_bin_dir(&self) -> anyhow::Result<Utf8PathBuf> {
let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 15, 14]; let prefer_versions = [ATTACHMENT_SERVICE_POSTGRES_VERSION, 15, 14];
for v in prefer_versions { for v in prefer_versions {
let path = Utf8PathBuf::from_path_buf(self.env.pg_bin_dir(v)?).unwrap(); let path = Utf8PathBuf::from_path_buf(self.env.pg_bin_dir(v)?).unwrap();
@@ -192,40 +319,77 @@ impl StorageController {
/// ///
/// Returns the database url /// Returns the database url
pub async fn setup_database(&self) -> anyhow::Result<String> { pub async fn setup_database(&self) -> anyhow::Result<String> {
const DB_NAME: &str = "storage_controller"; let database_url = format!(
let database_url = format!("postgresql://localhost:{}/{DB_NAME}", self.postgres_port); "postgresql://localhost:{}/attachment_service",
self.postgres_port
);
println!("Running attachment service database setup...");
fn change_database_of_url(database_url: &str, default_database: &str) -> (String, String) {
let base = ::url::Url::parse(database_url).unwrap();
let database = base.path_segments().unwrap().last().unwrap().to_owned();
let mut new_url = base.join(default_database).unwrap();
new_url.set_query(base.query());
(database, new_url.into())
}
let pg_bin_dir = self.get_pg_bin_dir().await?; #[derive(Debug, Clone)]
let createdb_path = pg_bin_dir.join("createdb"); pub struct CreateDatabaseStatement {
let output = Command::new(&createdb_path) db_name: String,
.args([ }
"-h",
"localhost",
"-p",
&format!("{}", self.postgres_port),
DB_NAME,
])
.output()
.await
.expect("Failed to spawn createdb");
if !output.status.success() { impl CreateDatabaseStatement {
let stderr = String::from_utf8(output.stderr).expect("Non-UTF8 output from createdb"); pub fn new(db_name: &str) -> Self {
if stderr.contains("already exists") { CreateDatabaseStatement {
tracing::info!("Database {DB_NAME} already exists"); db_name: db_name.to_owned(),
} else { }
anyhow::bail!("createdb failed with status {}: {stderr}", output.status);
} }
} }
impl<DB: Backend> QueryFragment<DB> for CreateDatabaseStatement {
fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, DB>) -> QueryResult<()> {
out.push_sql("CREATE DATABASE ");
out.push_identifier(&self.db_name)?;
Ok(())
}
}
impl<Conn> RunQueryDsl<Conn> for CreateDatabaseStatement {}
impl QueryId for CreateDatabaseStatement {
type QueryId = ();
const HAS_STATIC_QUERY_ID: bool = false;
}
if PgConnection::establish(&database_url).is_err() {
let (database, postgres_url) = change_database_of_url(&database_url, "postgres");
println!("Creating database: {database}");
let mut conn = PgConnection::establish(&postgres_url)?;
CreateDatabaseStatement::new(&database).execute(&mut conn)?;
}
let mut conn = PgConnection::establish(&database_url)?;
let migrations_dir = self
.find_source_root()
.await?
.join("control_plane/attachment_service/migrations");
let migrations = diesel_migrations::FileBasedMigrations::from_path(migrations_dir)?;
println!("Running migrations in {}", migrations.path().display());
HarnessWithOutput::write_to_stdout(&mut conn)
.run_pending_migrations(migrations)
.map(|_| ())
.map_err(|e| anyhow::anyhow!(e))?;
println!("Migrations complete");
Ok(database_url) Ok(database_url)
} }
pub async fn start(&self) -> anyhow::Result<()> { pub async fn start(&self) -> anyhow::Result<()> {
// Start a vanilla Postgres process used by the storage controller for persistence. // Start a vanilla Postgres process used by the attachment service for persistence.
let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone()) let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone())
.unwrap() .unwrap()
.join("storage_controller_db"); .join("attachment_service_db");
let pg_bin_dir = self.get_pg_bin_dir().await?; let pg_bin_dir = self.get_pg_bin_dir().await?;
let pg_log_path = pg_data_path.join("postgres.log"); let pg_log_path = pg_data_path.join("postgres.log");
@@ -248,7 +412,7 @@ impl StorageController {
.await?; .await?;
}; };
println!("Starting storage controller database..."); println!("Starting attachment service database...");
let db_start_args = [ let db_start_args = [
"-w", "-w",
"-D", "-D",
@@ -259,7 +423,7 @@ impl StorageController {
]; ];
background_process::start_process( background_process::start_process(
"storage_controller_db", "attachment_service_db",
&self.env.base_data_dir, &self.env.base_data_dir,
pg_bin_dir.join("pg_ctl").as_std_path(), pg_bin_dir.join("pg_ctl").as_std_path(),
db_start_args, db_start_args,
@@ -272,43 +436,29 @@ impl StorageController {
// Run migrations on every startup, in case something changed. // Run migrations on every startup, in case something changed.
let database_url = self.setup_database().await?; let database_url = self.setup_database().await?;
let max_unavailable: humantime::Duration = NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL.into();
let mut args = vec![ let mut args = vec![
"-l", "-l",
&self.listen, &self.listen,
"-p", "-p",
self.path.as_ref(), self.path.as_ref(),
"--dev",
"--database-url", "--database-url",
&database_url, &database_url,
"--max-unavailable-interval",
&max_unavailable.to_string(),
] ]
.into_iter() .into_iter()
.map(|s| s.to_string()) .map(|s| s.to_string())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if let Some(private_key) = &self.private_key { if let Some(jwt_token) = &self.jwt_token {
let claims = Claims::new(None, Scope::PageServerApi);
let jwt_token =
encode_from_key_file(&claims, private_key).expect("failed to generate jwt token");
args.push(format!("--jwt-token={jwt_token}")); args.push(format!("--jwt-token={jwt_token}"));
} }
if let Some(public_key) = &self.public_key { if let Some(public_key_path) = &self.public_key_path {
args.push(format!("--public-key=\"{public_key}\"")); args.push(format!("--public-key={public_key_path}"));
}
if let Some(control_plane_compute_hook_api) = &self.env.control_plane_compute_hook_api {
args.push(format!(
"--compute-hook-url={control_plane_compute_hook_api}"
));
} }
background_process::start_process( background_process::start_process(
COMMAND, COMMAND,
&self.env.base_data_dir, &self.env.base_data_dir,
&self.env.storage_controller_bin(), &self.env.attachment_service_bin(),
args, args,
[( [(
"NEON_REPO_DIR".to_string(), "NEON_REPO_DIR".to_string(),
@@ -316,7 +466,7 @@ impl StorageController {
)], )],
background_process::InitialPidFile::Create(self.pid_file()), background_process::InitialPidFile::Create(self.pid_file()),
|| async { || async {
match self.ready().await { match self.status().await {
Ok(_) => Ok(true), Ok(_) => Ok(true),
Err(_) => Ok(false), Err(_) => Ok(false),
} }
@@ -330,10 +480,10 @@ impl StorageController {
pub async fn stop(&self, immediate: bool) -> anyhow::Result<()> { pub async fn stop(&self, immediate: bool) -> anyhow::Result<()> {
background_process::stop_process(immediate, COMMAND, &self.pid_file())?; background_process::stop_process(immediate, COMMAND, &self.pid_file())?;
let pg_data_path = self.env.base_data_dir.join("storage_controller_db"); let pg_data_path = self.env.base_data_dir.join("attachment_service_db");
let pg_bin_dir = self.get_pg_bin_dir().await?; let pg_bin_dir = self.get_pg_bin_dir().await?;
println!("Stopping storage controller database..."); println!("Stopping attachment service database...");
let pg_stop_args = ["-D", &pg_data_path.to_string_lossy(), "stop"]; let pg_stop_args = ["-D", &pg_data_path.to_string_lossy(), "stop"];
let stop_status = Command::new(pg_bin_dir.join("pg_ctl")) let stop_status = Command::new(pg_bin_dir.join("pg_ctl"))
.args(pg_stop_args) .args(pg_stop_args)
@@ -352,31 +502,17 @@ impl StorageController {
// fine that stop failed. Otherwise it is an error that stop failed. // fine that stop failed. Otherwise it is an error that stop failed.
const PG_STATUS_NOT_RUNNING: i32 = 3; const PG_STATUS_NOT_RUNNING: i32 = 3;
if Some(PG_STATUS_NOT_RUNNING) == status_exitcode.code() { if Some(PG_STATUS_NOT_RUNNING) == status_exitcode.code() {
println!("Storage controller database is already stopped"); println!("Attachment service data base is already stopped");
return Ok(()); return Ok(());
} else { } else {
anyhow::bail!("Failed to stop storage controller database: {stop_status}") anyhow::bail!("Failed to stop attachment service database: {stop_status}")
} }
} }
Ok(()) Ok(())
} }
fn get_claims_for_path(path: &str) -> anyhow::Result<Option<Claims>> { /// Simple HTTP request wrapper for calling into attachment service
let category = match path.find('/') {
Some(idx) => &path[..idx],
None => path,
};
match category {
"status" | "ready" => Ok(None),
"control" | "debug" => Ok(Some(Claims::new(None, Scope::Admin))),
"v1" => Ok(Some(Claims::new(None, Scope::PageServerApi))),
_ => Err(anyhow::anyhow!("Failed to determine claims for {}", path)),
}
}
/// Simple HTTP request wrapper for calling into storage controller
async fn dispatch<RQ, RS>( async fn dispatch<RQ, RS>(
&self, &self,
method: hyper::Method, method: hyper::Method,
@@ -387,30 +523,23 @@ impl StorageController {
RQ: Serialize + Sized, RQ: Serialize + Sized,
RS: DeserializeOwned + Sized, RS: DeserializeOwned + Sized,
{ {
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out let url = self
// for general purpose API access. .env
let listen_url = self.env.control_plane_api.clone().unwrap(); .control_plane_api
let url = Url::from_str(&format!( .clone()
"http://{}:{}/{path}", .unwrap()
listen_url.host_str().unwrap(), .join(&path)
listen_url.port().unwrap() .unwrap();
))
.unwrap();
let mut builder = self.client.request(method, url); let mut builder = self.client.request(method, url);
if let Some(body) = body { if let Some(body) = body {
builder = builder.json(&body) builder = builder.json(&body)
} }
if let Some(private_key) = &self.private_key { if let Some(jwt_token) = &self.jwt_token {
println!("Getting claims for path {}", path); builder = builder.header(
if let Some(required_claims) = Self::get_claims_for_path(&path)? { reqwest::header::AUTHORIZATION,
println!("Got claims {:?} for path {}", required_claims, path); format!("Bearer {jwt_token}"),
let jwt_token = encode_from_key_file(&required_claims, private_key)?; );
builder = builder.header(
reqwest::header::AUTHORIZATION,
format!("Bearer {jwt_token}"),
);
}
} }
let response = builder.send().await?; let response = builder.send().await?;
@@ -437,7 +566,7 @@ impl StorageController {
let response = self let response = self
.dispatch::<_, AttachHookResponse>( .dispatch::<_, AttachHookResponse>(
Method::POST, Method::POST,
"debug/v1/attach-hook".to_string(), "attach-hook".to_string(),
Some(request), Some(request),
) )
.await?; .await?;
@@ -453,11 +582,7 @@ impl StorageController {
let request = InspectRequest { tenant_shard_id }; let request = InspectRequest { tenant_shard_id };
let response = self let response = self
.dispatch::<_, InspectResponse>( .dispatch::<_, InspectResponse>(Method::POST, "inspect".to_string(), Some(request))
Method::POST,
"debug/v1/inspect".to_string(),
Some(request),
)
.await?; .await?;
Ok(response.attachment) Ok(response.attachment)
@@ -474,12 +599,8 @@ impl StorageController {
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> { pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
self.dispatch::<(), _>( self.dispatch::<(), _>(Method::GET, format!("tenant/{tenant_id}/locate"), None)
Method::GET, .await
format!("debug/v1/tenant/{tenant_id}/locate"),
None,
)
.await
} }
#[instrument(skip(self))] #[instrument(skip(self))]
@@ -490,7 +611,7 @@ impl StorageController {
) -> anyhow::Result<TenantShardMigrateResponse> { ) -> anyhow::Result<TenantShardMigrateResponse> {
self.dispatch( self.dispatch(
Method::PUT, Method::PUT,
format!("control/v1/tenant/{tenant_shard_id}/migrate"), format!("tenant/{tenant_shard_id}/migrate"),
Some(TenantShardMigrateRequest { Some(TenantShardMigrateRequest {
tenant_shard_id, tenant_shard_id,
node_id, node_id,
@@ -499,27 +620,9 @@ impl StorageController {
.await .await
} }
#[instrument(skip(self), fields(%tenant_id, %new_shard_count))]
pub async fn tenant_split(
&self,
tenant_id: TenantId,
new_shard_count: u8,
new_stripe_size: Option<ShardStripeSize>,
) -> anyhow::Result<TenantShardSplitResponse> {
self.dispatch(
Method::PUT,
format!("control/v1/tenant/{tenant_id}/shard_split"),
Some(TenantShardSplitRequest {
new_shard_count,
new_stripe_size,
}),
)
.await
}
#[instrument(skip_all, fields(node_id=%req.node_id))] #[instrument(skip_all, fields(node_id=%req.node_id))]
pub async fn node_register(&self, req: NodeRegisterRequest) -> anyhow::Result<()> { pub async fn node_register(&self, req: NodeRegisterRequest) -> anyhow::Result<()> {
self.dispatch::<_, ()>(Method::POST, "control/v1/node".to_string(), Some(req)) self.dispatch::<_, ()>(Method::POST, "node".to_string(), Some(req))
.await .await
} }
@@ -527,15 +630,15 @@ impl StorageController {
pub async fn node_configure(&self, req: NodeConfigureRequest) -> anyhow::Result<()> { pub async fn node_configure(&self, req: NodeConfigureRequest) -> anyhow::Result<()> {
self.dispatch::<_, ()>( self.dispatch::<_, ()>(
Method::PUT, Method::PUT,
format!("control/v1/node/{}/config", req.node_id), format!("node/{}/config", req.node_id),
Some(req), Some(req),
) )
.await .await
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn ready(&self) -> anyhow::Result<()> { pub async fn status(&self) -> anyhow::Result<()> {
self.dispatch::<(), ()>(Method::GET, "ready".to_string(), None) self.dispatch::<(), ()>(Method::GET, "status".to_string(), None)
.await .await
} }

View File

@@ -72,6 +72,7 @@ where
let log_path = datadir.join(format!("{process_name}.log")); let log_path = datadir.join(format!("{process_name}.log"));
let process_log_file = fs::OpenOptions::new() let process_log_file = fs::OpenOptions::new()
.create(true) .create(true)
.write(true)
.append(true) .append(true)
.open(&log_path) .open(&log_path)
.with_context(|| { .with_context(|| {
@@ -255,9 +256,7 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
for env_key in [ for env_key in [
"AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY",
"AWS_PROFILE", "AWS_SESSION_TOKEN",
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
"HOME",
"AZURE_STORAGE_ACCOUNT", "AZURE_STORAGE_ACCOUNT",
"AZURE_STORAGE_ACCESS_KEY", "AZURE_STORAGE_ACCESS_KEY",
] { ] {
@@ -294,7 +293,7 @@ where
// is in state 'taken' but the thread that would unlock it is // is in state 'taken' but the thread that would unlock it is
// not there. // not there.
// 2. A rust object that represented some external resource in the // 2. A rust object that represented some external resource in the
// parent now got implicitly copied by the fork, even though // parent now got implicitly copied by the the fork, even though
// the object's type is not `Copy`. The parent program may use // the object's type is not `Copy`. The parent program may use
// non-copyability as way to enforce unique ownership of an // non-copyability as way to enforce unique ownership of an
// external resource in the typesystem. The fork breaks that // external resource in the typesystem. The fork breaks that

View File

@@ -8,15 +8,14 @@
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command, ValueEnum}; use clap::{value_parser, Arg, ArgAction, ArgMatches, Command, ValueEnum};
use compute_api::spec::ComputeMode; use compute_api::spec::ComputeMode;
use control_plane::attachment_service::{
AttachmentService, NodeAvailability, NodeConfigureRequest, NodeSchedulingPolicy,
};
use control_plane::endpoint::ComputeControlPlane; use control_plane::endpoint::ComputeControlPlane;
use control_plane::local_env::{InitForceMode, LocalEnv}; use control_plane::local_env::{InitForceMode, LocalEnv};
use control_plane::pageserver::{PageServerNode, PAGESERVER_REMOTE_STORAGE_DIR}; use control_plane::pageserver::{PageServerNode, PAGESERVER_REMOTE_STORAGE_DIR};
use control_plane::safekeeper::SafekeeperNode; use control_plane::safekeeper::SafekeeperNode;
use control_plane::storage_controller::StorageController;
use control_plane::{broker, local_env}; use control_plane::{broker, local_env};
use pageserver_api::controller_api::{
NodeAvailability, NodeConfigureRequest, NodeSchedulingPolicy, PlacementPolicy,
};
use pageserver_api::models::{ use pageserver_api::models::{
ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo, ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo,
}; };
@@ -52,7 +51,7 @@ project_git_version!(GIT_VERSION);
const DEFAULT_PG_VERSION: &str = "15"; const DEFAULT_PG_VERSION: &str = "15";
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/"; const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/";
fn default_conf(num_pageservers: u16) -> String { fn default_conf(num_pageservers: u16) -> String {
let mut template = format!( let mut template = format!(
@@ -138,7 +137,7 @@ fn main() -> Result<()> {
"start" => rt.block_on(handle_start_all(sub_args, &env)), "start" => rt.block_on(handle_start_all(sub_args, &env)),
"stop" => rt.block_on(handle_stop_all(sub_args, &env)), "stop" => rt.block_on(handle_stop_all(sub_args, &env)),
"pageserver" => rt.block_on(handle_pageserver(sub_args, &env)), "pageserver" => rt.block_on(handle_pageserver(sub_args, &env)),
"storage_controller" => rt.block_on(handle_storage_controller(sub_args, &env)), "attachment_service" => rt.block_on(handle_attachment_service(sub_args, &env)),
"safekeeper" => rt.block_on(handle_safekeeper(sub_args, &env)), "safekeeper" => rt.block_on(handle_safekeeper(sub_args, &env)),
"endpoint" => rt.block_on(handle_endpoint(sub_args, &env)), "endpoint" => rt.block_on(handle_endpoint(sub_args, &env)),
"mappings" => handle_mappings(sub_args, &mut env), "mappings" => handle_mappings(sub_args, &mut env),
@@ -435,33 +434,27 @@ async fn handle_tenant(
let shard_stripe_size: Option<u32> = let shard_stripe_size: Option<u32> =
create_match.get_one::<u32>("shard-stripe-size").cloned(); create_match.get_one::<u32>("shard-stripe-size").cloned();
let placement_policy = match create_match.get_one::<String>("placement-policy") {
Some(s) if !s.is_empty() => serde_json::from_str::<PlacementPolicy>(s)?,
_ => PlacementPolicy::Attached(0),
};
let tenant_conf = PageServerNode::parse_config(tenant_conf)?; let tenant_conf = PageServerNode::parse_config(tenant_conf)?;
// If tenant ID was not specified, generate one // If tenant ID was not specified, generate one
let tenant_id = parse_tenant_id(create_match)?.unwrap_or_else(TenantId::generate); let tenant_id = parse_tenant_id(create_match)?.unwrap_or_else(TenantId::generate);
// We must register the tenant with the storage controller, so // We must register the tenant with the attachment service, so
// that when the pageserver restarts, it will be re-attached. // that when the pageserver restarts, it will be re-attached.
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
storage_controller attachment_service
.tenant_create(TenantCreateRequest { .tenant_create(TenantCreateRequest {
// Note that ::unsharded here isn't actually because the tenant is unsharded, its because the // Note that ::unsharded here isn't actually because the tenant is unsharded, its because the
// storage controller expecfs a shard-naive tenant_id in this attribute, and the TenantCreateRequest // attachment service expecfs a shard-naive tenant_id in this attribute, and the TenantCreateRequest
// type is used both in storage controller (for creating tenants) and in pageserver (for creating shards) // type is used both in attachment service (for creating tenants) and in pageserver (for creating shards)
new_tenant_id: TenantShardId::unsharded(tenant_id), new_tenant_id: TenantShardId::unsharded(tenant_id),
generation: None, generation: None,
shard_parameters: ShardParameters { shard_parameters: ShardParameters {
count: ShardCount::new(shard_count), count: ShardCount(shard_count),
stripe_size: shard_stripe_size stripe_size: shard_stripe_size
.map(ShardStripeSize) .map(ShardStripeSize)
.unwrap_or(ShardParameters::DEFAULT_STRIPE_SIZE), .unwrap_or(ShardParameters::DEFAULT_STRIPE_SIZE),
}, },
placement_policy: Some(placement_policy),
config: tenant_conf, config: tenant_conf,
}) })
.await?; .await?;
@@ -476,9 +469,9 @@ async fn handle_tenant(
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
// FIXME: passing None for ancestor_start_lsn is not kosher in a sharded world: we can't have // FIXME: passing None for ancestor_start_lsn is not kosher in a sharded world: we can't have
// different shards picking different start lsns. Maybe we have to teach storage controller // different shards picking different start lsns. Maybe we have to teach attachment service
// to let shard 0 branch first and then propagate the chosen LSN to other shards. // to let shard 0 branch first and then propagate the chosen LSN to other shards.
storage_controller attachment_service
.tenant_timeline_create( .tenant_timeline_create(
tenant_id, tenant_id,
TimelineCreateRequest { TimelineCreateRequest {
@@ -523,7 +516,65 @@ async fn handle_tenant(
.with_context(|| format!("Tenant config failed for tenant with id {tenant_id}"))?; .with_context(|| format!("Tenant config failed for tenant with id {tenant_id}"))?;
println!("tenant {tenant_id} successfully configured on the pageserver"); println!("tenant {tenant_id} successfully configured on the pageserver");
} }
Some(("migrate", matches)) => {
let tenant_shard_id = get_tenant_shard_id(matches, env)?;
let new_pageserver = get_pageserver(env, matches)?;
let new_pageserver_id = new_pageserver.conf.id;
let attachment_service = AttachmentService::from_env(env);
attachment_service
.tenant_migrate(tenant_shard_id, new_pageserver_id)
.await?;
println!("tenant {tenant_shard_id} migrated to {}", new_pageserver_id);
}
Some(("status", matches)) => {
let tenant_id = get_tenant_id(matches, env)?;
let mut shard_table = comfy_table::Table::new();
shard_table.set_header(["Shard", "Pageserver", "Physical Size"]);
let mut tenant_synthetic_size = None;
let attachment_service = AttachmentService::from_env(env);
for shard in attachment_service.tenant_locate(tenant_id).await?.shards {
let pageserver =
PageServerNode::from_env(env, env.get_pageserver_conf(shard.node_id)?);
let size = pageserver
.http_client
.tenant_details(shard.shard_id)
.await?
.tenant_info
.current_physical_size
.unwrap();
shard_table.add_row([
format!("{}", shard.shard_id.shard_slug()),
format!("{}", shard.node_id.0),
format!("{} MiB", size / (1024 * 1024)),
]);
if shard.shard_id.is_zero() {
tenant_synthetic_size =
Some(pageserver.tenant_synthetic_size(shard.shard_id).await?);
}
}
let Some(synthetic_size) = tenant_synthetic_size else {
bail!("Shard 0 not found")
};
let mut tenant_table = comfy_table::Table::new();
tenant_table.add_row(["Tenant ID".to_string(), tenant_id.to_string()]);
tenant_table.add_row([
"Synthetic size".to_string(),
format!("{} MiB", synthetic_size.size.unwrap_or(0) / (1024 * 1024)),
]);
println!("{tenant_table}");
println!("{shard_table}");
}
Some((sub_name, _)) => bail!("Unexpected tenant subcommand '{}'", sub_name), Some((sub_name, _)) => bail!("Unexpected tenant subcommand '{}'", sub_name),
None => bail!("no tenant subcommand provided"), None => bail!("no tenant subcommand provided"),
} }
@@ -535,7 +586,7 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
match timeline_match.subcommand() { match timeline_match.subcommand() {
Some(("list", list_match)) => { Some(("list", list_match)) => {
// TODO(sharding): this command shouldn't have to specify a shard ID: we should ask the storage controller // TODO(sharding): this command shouldn't have to specify a shard ID: we should ask the attachment service
// where shard 0 is attached, and query there. // where shard 0 is attached, and query there.
let tenant_shard_id = get_tenant_shard_id(list_match, env)?; let tenant_shard_id = get_tenant_shard_id(list_match, env)?;
let timelines = pageserver.timeline_list(&tenant_shard_id).await?; let timelines = pageserver.timeline_list(&tenant_shard_id).await?;
@@ -555,7 +606,7 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
let new_timeline_id_opt = parse_timeline_id(create_match)?; let new_timeline_id_opt = parse_timeline_id(create_match)?;
let new_timeline_id = new_timeline_id_opt.unwrap_or(TimelineId::generate()); let new_timeline_id = new_timeline_id_opt.unwrap_or(TimelineId::generate());
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
let create_req = TimelineCreateRequest { let create_req = TimelineCreateRequest {
new_timeline_id, new_timeline_id,
ancestor_timeline_id: None, ancestor_timeline_id: None,
@@ -563,7 +614,7 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
ancestor_start_lsn: None, ancestor_start_lsn: None,
pg_version: Some(pg_version), pg_version: Some(pg_version),
}; };
let timeline_info = storage_controller let timeline_info = attachment_service
.tenant_timeline_create(tenant_id, create_req) .tenant_timeline_create(tenant_id, create_req)
.await?; .await?;
@@ -581,10 +632,6 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
let name = import_match let name = import_match
.get_one::<String>("node-name") .get_one::<String>("node-name")
.ok_or_else(|| anyhow!("No node name provided"))?; .ok_or_else(|| anyhow!("No node name provided"))?;
let update_catalog = import_match
.get_one::<bool>("update-catalog")
.cloned()
.unwrap_or_default();
// Parse base inputs // Parse base inputs
let base_tarfile = import_match let base_tarfile = import_match
@@ -627,7 +674,6 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
None, None,
pg_version, pg_version,
ComputeMode::Primary, ComputeMode::Primary,
!update_catalog,
)?; )?;
println!("Done"); println!("Done");
} }
@@ -652,7 +698,7 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
.transpose() .transpose()
.context("Failed to parse ancestor start Lsn from the request")?; .context("Failed to parse ancestor start Lsn from the request")?;
let new_timeline_id = TimelineId::generate(); let new_timeline_id = TimelineId::generate();
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
let create_req = TimelineCreateRequest { let create_req = TimelineCreateRequest {
new_timeline_id, new_timeline_id,
ancestor_timeline_id: Some(ancestor_timeline_id), ancestor_timeline_id: Some(ancestor_timeline_id),
@@ -660,7 +706,7 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local
ancestor_start_lsn: start_lsn, ancestor_start_lsn: start_lsn,
pg_version: None, pg_version: None,
}; };
let timeline_info = storage_controller let timeline_info = attachment_service
.tenant_timeline_create(tenant_id, create_req) .tenant_timeline_create(tenant_id, create_req)
.await?; .await?;
@@ -689,7 +735,7 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
match sub_name { match sub_name {
"list" => { "list" => {
// TODO(sharding): this command shouldn't have to specify a shard ID: we should ask the storage controller // TODO(sharding): this command shouldn't have to specify a shard ID: we should ask the attachment service
// where shard 0 is attached, and query there. // where shard 0 is attached, and query there.
let tenant_shard_id = get_tenant_shard_id(sub_args, env)?; let tenant_shard_id = get_tenant_shard_id(sub_args, env)?;
let timeline_infos = get_timeline_infos(env, &tenant_shard_id) let timeline_infos = get_timeline_infos(env, &tenant_shard_id)
@@ -749,7 +795,7 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
&endpoint.timeline_id.to_string(), &endpoint.timeline_id.to_string(),
branch_name, branch_name,
lsn_str.as_str(), lsn_str.as_str(),
&format!("{}", endpoint.status()), endpoint.status(),
]); ]);
} }
@@ -765,10 +811,6 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
.get_one::<String>("endpoint_id") .get_one::<String>("endpoint_id")
.map(String::to_string) .map(String::to_string)
.unwrap_or_else(|| format!("ep-{branch_name}")); .unwrap_or_else(|| format!("ep-{branch_name}"));
let update_catalog = sub_args
.get_one::<bool>("update-catalog")
.cloned()
.unwrap_or_default();
let lsn = sub_args let lsn = sub_args
.get_one::<String>("lsn") .get_one::<String>("lsn")
@@ -818,7 +860,6 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
http_port, http_port,
pg_version, pg_version,
mode, mode,
!update_catalog,
)?; )?;
} }
"start" => { "start" => {
@@ -857,11 +898,6 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
.get(endpoint_id.as_str()) .get(endpoint_id.as_str())
.ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?; .ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?;
let create_test_user = sub_args
.get_one::<bool>("create-test-user")
.cloned()
.unwrap_or_default();
cplane.check_conflicting_endpoints( cplane.check_conflicting_endpoints(
endpoint.mode, endpoint.mode,
endpoint.tenant_id, endpoint.tenant_id,
@@ -874,21 +910,21 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
( (
vec![(parsed.0, parsed.1.unwrap_or(5432))], vec![(parsed.0, parsed.1.unwrap_or(5432))],
// If caller is telling us what pageserver to use, this is not a tenant which is // If caller is telling us what pageserver to use, this is not a tenant which is
// full managed by storage controller, therefore not sharded. // full managed by attachment service, therefore not sharded.
ShardParameters::DEFAULT_STRIPE_SIZE, ShardParameters::DEFAULT_STRIPE_SIZE,
) )
} else { } else {
// Look up the currently attached location of the tenant, and its striping metadata, // Look up the currently attached location of the tenant, and its striping metadata,
// to pass these on to postgres. // to pass these on to postgres.
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?; let locate_result = attachment_service.tenant_locate(endpoint.tenant_id).await?;
let pageservers = locate_result let pageservers = locate_result
.shards .shards
.into_iter() .into_iter()
.map(|shard| { .map(|shard| {
( (
Host::parse(&shard.listen_pg_addr) Host::parse(&shard.listen_pg_addr)
.expect("Storage controller reported bad hostname"), .expect("Attachment service reported bad hostname"),
shard.listen_pg_port, shard.listen_pg_port,
) )
}) })
@@ -916,7 +952,6 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
pageservers, pageservers,
remote_ext_config, remote_ext_config,
stripe_size.0 as usize, stripe_size.0 as usize,
create_test_user,
) )
.await?; .await?;
} }
@@ -937,8 +972,8 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
pageserver.pg_connection_config.port(), pageserver.pg_connection_config.port(),
)] )]
} else { } else {
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
storage_controller attachment_service
.tenant_locate(endpoint.tenant_id) .tenant_locate(endpoint.tenant_id)
.await? .await?
.shards .shards
@@ -946,26 +981,25 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
.map(|shard| { .map(|shard| {
( (
Host::parse(&shard.listen_pg_addr) Host::parse(&shard.listen_pg_addr)
.expect("Storage controller reported malformed host"), .expect("Attachment service reported malformed host"),
shard.listen_pg_port, shard.listen_pg_port,
) )
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>()
}; };
endpoint.reconfigure(pageservers, None).await?; endpoint.reconfigure(pageservers).await?;
} }
"stop" => { "stop" => {
let endpoint_id = sub_args let endpoint_id = sub_args
.get_one::<String>("endpoint_id") .get_one::<String>("endpoint_id")
.ok_or_else(|| anyhow!("No endpoint ID was provided to stop"))?; .ok_or_else(|| anyhow!("No endpoint ID was provided to stop"))?;
let destroy = sub_args.get_flag("destroy"); let destroy = sub_args.get_flag("destroy");
let mode = sub_args.get_one::<String>("mode").expect("has a default");
let endpoint = cplane let endpoint = cplane
.endpoints .endpoints
.get(endpoint_id.as_str()) .get(endpoint_id.as_str())
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
endpoint.stop(mode, destroy)?; endpoint.stop(destroy)?;
} }
_ => bail!("Unexpected endpoint subcommand '{sub_name}'"), _ => bail!("Unexpected endpoint subcommand '{sub_name}'"),
@@ -1022,8 +1056,9 @@ fn get_pageserver(env: &local_env::LocalEnv, args: &ArgMatches) -> Result<PageSe
async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
match sub_match.subcommand() { match sub_match.subcommand() {
Some(("start", subcommand_args)) => { Some(("start", subcommand_args)) => {
let register = subcommand_args.get_one::<bool>("register").unwrap_or(&true);
if let Err(e) = get_pageserver(env, subcommand_args)? if let Err(e) = get_pageserver(env, subcommand_args)?
.start(&pageserver_config_overrides(subcommand_args)) .start(&pageserver_config_overrides(subcommand_args), *register)
.await .await
{ {
eprintln!("pageserver start failed: {e}"); eprintln!("pageserver start failed: {e}");
@@ -1052,7 +1087,7 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
} }
if let Err(e) = pageserver if let Err(e) = pageserver
.start(&pageserver_config_overrides(subcommand_args)) .start(&pageserver_config_overrides(subcommand_args), false)
.await .await
{ {
eprintln!("pageserver start failed: {e}"); eprintln!("pageserver start failed: {e}");
@@ -1065,8 +1100,8 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
let scheduling = subcommand_args.get_one("scheduling"); let scheduling = subcommand_args.get_one("scheduling");
let availability = subcommand_args.get_one("availability"); let availability = subcommand_args.get_one("availability");
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
storage_controller attachment_service
.node_configure(NodeConfigureRequest { .node_configure(NodeConfigureRequest {
node_id: pageserver.conf.id, node_id: pageserver.conf.id,
scheduling: scheduling.cloned(), scheduling: scheduling.cloned(),
@@ -1091,11 +1126,11 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
Ok(()) Ok(())
} }
async fn handle_storage_controller( async fn handle_attachment_service(
sub_match: &ArgMatches, sub_match: &ArgMatches,
env: &local_env::LocalEnv, env: &local_env::LocalEnv,
) -> Result<()> { ) -> Result<()> {
let svc = StorageController::from_env(env); let svc = AttachmentService::from_env(env);
match sub_match.subcommand() { match sub_match.subcommand() {
Some(("start", _start_match)) => { Some(("start", _start_match)) => {
if let Err(e) = svc.start().await { if let Err(e) = svc.start().await {
@@ -1115,8 +1150,8 @@ async fn handle_storage_controller(
exit(1); exit(1);
} }
} }
Some((sub_name, _)) => bail!("Unexpected storage_controller subcommand '{}'", sub_name), Some((sub_name, _)) => bail!("Unexpected attachment_service subcommand '{}'", sub_name),
None => bail!("no storage_controller subcommand provided"), None => bail!("no attachment_service subcommand provided"),
} }
Ok(()) Ok(())
} }
@@ -1201,11 +1236,11 @@ async fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
broker::start_broker_process(env).await?; broker::start_broker_process(env).await?;
// Only start the storage controller if the pageserver is configured to need it // Only start the attachment service if the pageserver is configured to need it
if env.control_plane_api.is_some() { if env.control_plane_api.is_some() {
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
if let Err(e) = storage_controller.start().await { if let Err(e) = attachment_service.start().await {
eprintln!("storage_controller start failed: {:#}", e); eprintln!("attachment_service start failed: {:#}", e);
try_stop_all(env, true).await; try_stop_all(env, true).await;
exit(1); exit(1);
} }
@@ -1214,7 +1249,7 @@ async fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
for ps_conf in &env.pageservers { for ps_conf in &env.pageservers {
let pageserver = PageServerNode::from_env(env, ps_conf); let pageserver = PageServerNode::from_env(env, ps_conf);
if let Err(e) = pageserver if let Err(e) = pageserver
.start(&pageserver_config_overrides(sub_match)) .start(&pageserver_config_overrides(sub_match), true)
.await .await
{ {
eprintln!("pageserver {} start failed: {:#}", ps_conf.id, e); eprintln!("pageserver {} start failed: {:#}", ps_conf.id, e);
@@ -1248,7 +1283,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
match ComputeControlPlane::load(env.clone()) { match ComputeControlPlane::load(env.clone()) {
Ok(cplane) => { Ok(cplane) => {
for (_k, node) in cplane.endpoints { for (_k, node) in cplane.endpoints {
if let Err(e) = node.stop(if immediate { "immediate" } else { "fast " }, false) { if let Err(e) = node.stop(false) {
eprintln!("postgres stop failed: {e:#}"); eprintln!("postgres stop failed: {e:#}");
} }
} }
@@ -1277,9 +1312,9 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
} }
if env.control_plane_api.is_some() { if env.control_plane_api.is_some() {
let storage_controller = StorageController::from_env(env); let attachment_service = AttachmentService::from_env(env);
if let Err(e) = storage_controller.stop(immediate).await { if let Err(e) = attachment_service.stop(immediate).await {
eprintln!("storage controller stop failed: {e:#}"); eprintln!("attachment service stop failed: {e:#}");
} }
} }
} }
@@ -1401,18 +1436,6 @@ fn cli() -> Command {
.required(false) .required(false)
.default_value("1"); .default_value("1");
let update_catalog = Arg::new("update-catalog")
.value_parser(value_parser!(bool))
.long("update-catalog")
.help("If set, will set up the catalog for neon_superuser")
.required(false);
let create_test_user = Arg::new("create-test-user")
.value_parser(value_parser!(bool))
.long("create-test-user")
.help("If set, will create test user `user` and `neondb` database. Requires `update-catalog = true`")
.required(false);
Command::new("Neon CLI") Command::new("Neon CLI")
.arg_required_else_help(true) .arg_required_else_help(true)
.version(GIT_VERSION) .version(GIT_VERSION)
@@ -1473,7 +1496,6 @@ fn cli() -> Command {
.arg(Arg::new("end-lsn").long("end-lsn") .arg(Arg::new("end-lsn").long("end-lsn")
.help("Lsn the basebackup ends at")) .help("Lsn the basebackup ends at"))
.arg(pg_version_arg.clone()) .arg(pg_version_arg.clone())
.arg(update_catalog.clone())
) )
).subcommand( ).subcommand(
Command::new("tenant") Command::new("tenant")
@@ -1489,13 +1511,19 @@ fn cli() -> Command {
.help("Use this tenant in future CLI commands where tenant_id is needed, but not specified")) .help("Use this tenant in future CLI commands where tenant_id is needed, but not specified"))
.arg(Arg::new("shard-count").value_parser(value_parser!(u8)).long("shard-count").action(ArgAction::Set).help("Number of shards in the new tenant (default 1)")) .arg(Arg::new("shard-count").value_parser(value_parser!(u8)).long("shard-count").action(ArgAction::Set).help("Number of shards in the new tenant (default 1)"))
.arg(Arg::new("shard-stripe-size").value_parser(value_parser!(u32)).long("shard-stripe-size").action(ArgAction::Set).help("Sharding stripe size in pages")) .arg(Arg::new("shard-stripe-size").value_parser(value_parser!(u32)).long("shard-stripe-size").action(ArgAction::Set).help("Sharding stripe size in pages"))
.arg(Arg::new("placement-policy").value_parser(value_parser!(String)).long("placement-policy").action(ArgAction::Set).help("Placement policy shards in this tenant"))
) )
.subcommand(Command::new("set-default").arg(tenant_id_arg.clone().required(true)) .subcommand(Command::new("set-default").arg(tenant_id_arg.clone().required(true))
.about("Set a particular tenant as default in future CLI commands where tenant_id is needed, but not specified")) .about("Set a particular tenant as default in future CLI commands where tenant_id is needed, but not specified"))
.subcommand(Command::new("config") .subcommand(Command::new("config")
.arg(tenant_id_arg.clone()) .arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))) .arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false)))
.subcommand(Command::new("migrate")
.about("Migrate a tenant from one pageserver to another")
.arg(tenant_id_arg.clone())
.arg(pageserver_id_arg.clone()))
.subcommand(Command::new("status")
.about("Human readable summary of the tenant's shards and attachment locations")
.arg(tenant_id_arg.clone()))
) )
.subcommand( .subcommand(
Command::new("pageserver") Command::new("pageserver")
@@ -1505,7 +1533,11 @@ fn cli() -> Command {
.subcommand(Command::new("status")) .subcommand(Command::new("status"))
.subcommand(Command::new("start") .subcommand(Command::new("start")
.about("Start local pageserver") .about("Start local pageserver")
.arg(pageserver_config_args.clone()) .arg(pageserver_config_args.clone()).arg(Arg::new("register")
.long("register")
.default_value("true").required(false)
.value_parser(value_parser!(bool))
.value_name("register"))
) )
.subcommand(Command::new("stop") .subcommand(Command::new("stop")
.about("Stop local pageserver") .about("Stop local pageserver")
@@ -1523,9 +1555,9 @@ fn cli() -> Command {
) )
) )
.subcommand( .subcommand(
Command::new("storage_controller") Command::new("attachment_service")
.arg_required_else_help(true) .arg_required_else_help(true)
.about("Manage storage_controller") .about("Manage attachment_service")
.subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone())) .subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(Command::new("stop").about("Stop local pageserver") .subcommand(Command::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone())) .arg(stop_mode_arg.clone()))
@@ -1572,7 +1604,6 @@ fn cli() -> Command {
.required(false)) .required(false))
.arg(pg_version_arg.clone()) .arg(pg_version_arg.clone())
.arg(hot_standby_arg.clone()) .arg(hot_standby_arg.clone())
.arg(update_catalog)
) )
.subcommand(Command::new("start") .subcommand(Command::new("start")
.about("Start postgres.\n If the endpoint doesn't exist yet, it is created.") .about("Start postgres.\n If the endpoint doesn't exist yet, it is created.")
@@ -1580,7 +1611,6 @@ fn cli() -> Command {
.arg(endpoint_pageserver_id_arg.clone()) .arg(endpoint_pageserver_id_arg.clone())
.arg(safekeepers_arg) .arg(safekeepers_arg)
.arg(remote_ext_config_args) .arg(remote_ext_config_args)
.arg(create_test_user)
) )
.subcommand(Command::new("reconfigure") .subcommand(Command::new("reconfigure")
.about("Reconfigure the endpoint") .about("Reconfigure the endpoint")
@@ -1597,16 +1627,7 @@ fn cli() -> Command {
.long("destroy") .long("destroy")
.action(ArgAction::SetTrue) .action(ArgAction::SetTrue)
.required(false) .required(false)
) )
.arg(
Arg::new("mode")
.help("Postgres shutdown mode, passed to \"pg_ctl -m <mode>\"")
.long("mode")
.action(ArgAction::Set)
.required(false)
.value_parser(["smart", "fast", "immediate"])
.default_value("fast")
)
) )
) )

View File

@@ -12,7 +12,7 @@
//! //!
//! The endpoint is managed by the `compute_ctl` binary. When an endpoint is //! The endpoint is managed by the `compute_ctl` binary. When an endpoint is
//! started, we launch `compute_ctl` It synchronizes the safekeepers, downloads //! started, we launch `compute_ctl` It synchronizes the safekeepers, downloads
//! the basebackup from the pageserver to initialize the data directory, and //! the basebackup from the pageserver to initialize the the data directory, and
//! finally launches the PostgreSQL process. It watches the PostgreSQL process //! finally launches the PostgreSQL process. It watches the PostgreSQL process
//! until it exits. //! until it exits.
//! //!
@@ -41,25 +41,20 @@ use std::net::SocketAddr;
use std::net::TcpStream; use std::net::TcpStream;
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
use compute_api::spec::Database;
use compute_api::spec::PgIdent;
use compute_api::spec::RemoteExtSpec; use compute_api::spec::RemoteExtSpec;
use compute_api::spec::Role;
use nix::sys::signal::kill; use nix::sys::signal::kill;
use nix::sys::signal::Signal; use nix::sys::signal::Signal;
use pageserver_api::shard::ShardStripeSize;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use url::Host; use url::Host;
use utils::id::{NodeId, TenantId, TimelineId}; use utils::id::{NodeId, TenantId, TimelineId};
use crate::attachment_service::AttachmentService;
use crate::local_env::LocalEnv; use crate::local_env::LocalEnv;
use crate::postgresql_conf::PostgresConf; use crate::postgresql_conf::PostgresConf;
use crate::storage_controller::StorageController;
use compute_api::responses::{ComputeState, ComputeStatus}; use compute_api::responses::{ComputeState, ComputeStatus};
use compute_api::spec::{Cluster, ComputeFeature, ComputeMode, ComputeSpec}; use compute_api::spec::{Cluster, ComputeFeature, ComputeMode, ComputeSpec};
@@ -127,7 +122,6 @@ impl ComputeControlPlane {
http_port: Option<u16>, http_port: Option<u16>,
pg_version: u32, pg_version: u32,
mode: ComputeMode, mode: ComputeMode,
skip_pg_catalog_updates: bool,
) -> Result<Arc<Endpoint>> { ) -> Result<Arc<Endpoint>> {
let pg_port = pg_port.unwrap_or_else(|| self.get_port()); let pg_port = pg_port.unwrap_or_else(|| self.get_port());
let http_port = http_port.unwrap_or_else(|| self.get_port() + 1); let http_port = http_port.unwrap_or_else(|| self.get_port() + 1);
@@ -146,7 +140,7 @@ impl ComputeControlPlane {
// before and after start are the same. So, skip catalog updates, // before and after start are the same. So, skip catalog updates,
// with this we basically test a case of waking up an idle compute, where // with this we basically test a case of waking up an idle compute, where
// we also skip catalog updates in the cloud. // we also skip catalog updates in the cloud.
skip_pg_catalog_updates, skip_pg_catalog_updates: true,
features: vec![], features: vec![],
}); });
@@ -161,7 +155,7 @@ impl ComputeControlPlane {
http_port, http_port,
pg_port, pg_port,
pg_version, pg_version,
skip_pg_catalog_updates, skip_pg_catalog_updates: true,
features: vec![], features: vec![],
})?, })?,
)?; )?;
@@ -190,7 +184,7 @@ impl ComputeControlPlane {
v.tenant_id == tenant_id v.tenant_id == tenant_id
&& v.timeline_id == timeline_id && v.timeline_id == timeline_id
&& v.mode == mode && v.mode == mode
&& v.status() != EndpointStatus::Stopped && v.status() != "stopped"
}); });
if let Some((key, _)) = duplicates.next() { if let Some((key, _)) = duplicates.next() {
@@ -229,26 +223,6 @@ pub struct Endpoint {
features: Vec<ComputeFeature>, features: Vec<ComputeFeature>,
} }
#[derive(PartialEq, Eq)]
pub enum EndpointStatus {
Running,
Stopped,
Crashed,
RunningNoPidfile,
}
impl std::fmt::Display for EndpointStatus {
fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
let s = match self {
Self::Running => "running",
Self::Stopped => "stopped",
Self::Crashed => "crashed",
Self::RunningNoPidfile => "running, no pidfile",
};
write!(writer, "{}", s)
}
}
impl Endpoint { impl Endpoint {
fn from_dir_entry(entry: std::fs::DirEntry, env: &LocalEnv) -> Result<Endpoint> { fn from_dir_entry(entry: std::fs::DirEntry, env: &LocalEnv) -> Result<Endpoint> {
if !entry.file_type()?.is_dir() { if !entry.file_type()?.is_dir() {
@@ -406,16 +380,16 @@ impl Endpoint {
self.endpoint_path().join("pgdata") self.endpoint_path().join("pgdata")
} }
pub fn status(&self) -> EndpointStatus { pub fn status(&self) -> &str {
let timeout = Duration::from_millis(300); let timeout = Duration::from_millis(300);
let has_pidfile = self.pgdata().join("postmaster.pid").exists(); let has_pidfile = self.pgdata().join("postmaster.pid").exists();
let can_connect = TcpStream::connect_timeout(&self.pg_address, timeout).is_ok(); let can_connect = TcpStream::connect_timeout(&self.pg_address, timeout).is_ok();
match (has_pidfile, can_connect) { match (has_pidfile, can_connect) {
(true, true) => EndpointStatus::Running, (true, true) => "running",
(false, false) => EndpointStatus::Stopped, (false, false) => "stopped",
(true, false) => EndpointStatus::Crashed, (true, false) => "crashed",
(false, true) => EndpointStatus::RunningNoPidfile, (false, true) => "running, no pidfile",
} }
} }
@@ -506,9 +480,8 @@ impl Endpoint {
pageservers: Vec<(Host, u16)>, pageservers: Vec<(Host, u16)>,
remote_ext_config: Option<&String>, remote_ext_config: Option<&String>,
shard_stripe_size: usize, shard_stripe_size: usize,
create_test_user: bool,
) -> Result<()> { ) -> Result<()> {
if self.status() == EndpointStatus::Running { if self.status() == "running" {
anyhow::bail!("The endpoint is already running"); anyhow::bail!("The endpoint is already running");
} }
@@ -558,26 +531,8 @@ impl Endpoint {
cluster_id: None, // project ID: not used cluster_id: None, // project ID: not used
name: None, // project name: not used name: None, // project name: not used
state: None, state: None,
roles: if create_test_user { roles: vec![],
vec![Role { databases: vec![],
name: PgIdent::from_str("test").unwrap(),
encrypted_password: None,
options: None,
}]
} else {
Vec::new()
},
databases: if create_test_user {
vec![Database {
name: PgIdent::from_str("neondb").unwrap(),
owner: PgIdent::from_str("test").unwrap(),
options: None,
restrict_conn: false,
invalid: false,
}]
} else {
Vec::new()
},
settings: None, settings: None,
postgresql_conf: Some(postgresql_conf), postgresql_conf: Some(postgresql_conf),
}, },
@@ -591,7 +546,6 @@ impl Endpoint {
remote_extensions, remote_extensions,
pgbouncer_settings: None, pgbouncer_settings: None,
shard_stripe_size: Some(shard_stripe_size), shard_stripe_size: Some(shard_stripe_size),
primary_is_running: None,
}; };
let spec_path = self.endpoint_path().join("spec.json"); let spec_path = self.endpoint_path().join("spec.json");
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?; std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
@@ -603,16 +557,11 @@ impl Endpoint {
.open(self.endpoint_path().join("compute.log"))?; .open(self.endpoint_path().join("compute.log"))?;
// Launch compute_ctl // Launch compute_ctl
let conn_str = self.connstr("cloud_admin", "postgres"); println!("Starting postgres node at '{}'", self.connstr());
println!("Starting postgres node at '{}'", conn_str);
if create_test_user {
let conn_str = self.connstr("test", "neondb");
println!("Also at '{}'", conn_str);
}
let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl")); let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl"));
cmd.args(["--http-port", &self.http_address.port().to_string()]) cmd.args(["--http-port", &self.http_address.port().to_string()])
.args(["--pgdata", self.pgdata().to_str().unwrap()]) .args(["--pgdata", self.pgdata().to_str().unwrap()])
.args(["--connstr", &conn_str]) .args(["--connstr", &self.connstr()])
.args([ .args([
"--spec-path", "--spec-path",
self.endpoint_path().join("spec.json").to_str().unwrap(), self.endpoint_path().join("spec.json").to_str().unwrap(),
@@ -656,7 +605,7 @@ impl Endpoint {
// Wait for it to start // Wait for it to start
let mut attempt = 0; let mut attempt = 0;
const ATTEMPT_INTERVAL: Duration = Duration::from_millis(100); const ATTEMPT_INTERVAL: Duration = Duration::from_millis(100);
const MAX_ATTEMPTS: u32 = 10 * 90; // Wait up to 1.5 min const MAX_ATTEMPTS: u32 = 10 * 30; // Wait up to 30 s
loop { loop {
attempt += 1; attempt += 1;
match self.get_status().await { match self.get_status().await {
@@ -683,9 +632,7 @@ impl Endpoint {
} }
ComputeStatus::Empty ComputeStatus::Empty
| ComputeStatus::ConfigurationPending | ComputeStatus::ConfigurationPending
| ComputeStatus::Configuration | ComputeStatus::Configuration => {
| ComputeStatus::TerminationPending
| ComputeStatus::Terminated => {
bail!("unexpected compute status: {:?}", state.status) bail!("unexpected compute status: {:?}", state.status)
} }
} }
@@ -736,11 +683,7 @@ impl Endpoint {
} }
} }
pub async fn reconfigure( pub async fn reconfigure(&self, mut pageservers: Vec<(Host, u16)>) -> Result<()> {
&self,
mut pageservers: Vec<(Host, u16)>,
stripe_size: Option<ShardStripeSize>,
) -> Result<()> {
let mut spec: ComputeSpec = { let mut spec: ComputeSpec = {
let spec_path = self.endpoint_path().join("spec.json"); let spec_path = self.endpoint_path().join("spec.json");
let file = std::fs::File::open(spec_path)?; let file = std::fs::File::open(spec_path)?;
@@ -750,17 +693,17 @@ impl Endpoint {
let postgresql_conf = self.read_postgresql_conf()?; let postgresql_conf = self.read_postgresql_conf()?;
spec.cluster.postgresql_conf = Some(postgresql_conf); spec.cluster.postgresql_conf = Some(postgresql_conf);
// If we weren't given explicit pageservers, query the storage controller // If we weren't given explicit pageservers, query the attachment service
if pageservers.is_empty() { if pageservers.is_empty() {
let storage_controller = StorageController::from_env(&self.env); let attachment_service = AttachmentService::from_env(&self.env);
let locate_result = storage_controller.tenant_locate(self.tenant_id).await?; let locate_result = attachment_service.tenant_locate(self.tenant_id).await?;
pageservers = locate_result pageservers = locate_result
.shards .shards
.into_iter() .into_iter()
.map(|shard| { .map(|shard| {
( (
Host::parse(&shard.listen_pg_addr) Host::parse(&shard.listen_pg_addr)
.expect("Storage controller reported bad hostname"), .expect("Attachment service reported bad hostname"),
shard.listen_pg_port, shard.listen_pg_port,
) )
}) })
@@ -770,14 +713,8 @@ impl Endpoint {
let pageserver_connstr = Self::build_pageserver_connstr(&pageservers); let pageserver_connstr = Self::build_pageserver_connstr(&pageservers);
assert!(!pageserver_connstr.is_empty()); assert!(!pageserver_connstr.is_empty());
spec.pageserver_connstring = Some(pageserver_connstr); spec.pageserver_connstring = Some(pageserver_connstr);
if stripe_size.is_some() {
spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize);
}
let client = reqwest::Client::builder() let client = reqwest::Client::new();
.timeout(Duration::from_secs(30))
.build()
.unwrap();
let response = client let response = client
.post(format!( .post(format!(
"http://{}:{}/configure", "http://{}:{}/configure",
@@ -804,8 +741,22 @@ impl Endpoint {
} }
} }
pub fn stop(&self, mode: &str, destroy: bool) -> Result<()> { pub fn stop(&self, destroy: bool) -> Result<()> {
self.pg_ctl(&["-m", mode, "stop"], &None)?; // If we are going to destroy data directory,
// use immediate shutdown mode, otherwise,
// shutdown gracefully to leave the data directory sane.
//
// Postgres is always started from scratch, so stop
// without destroy only used for testing and debugging.
//
self.pg_ctl(
if destroy {
&["-m", "immediate", "stop"]
} else {
&["stop"]
},
&None,
)?;
// Also wait for the compute_ctl process to die. It might have some // Also wait for the compute_ctl process to die. It might have some
// cleanup work to do after postgres stops, like syncing safekeepers, // cleanup work to do after postgres stops, like syncing safekeepers,
@@ -826,13 +777,13 @@ impl Endpoint {
Ok(()) Ok(())
} }
pub fn connstr(&self, user: &str, db_name: &str) -> String { pub fn connstr(&self) -> String {
format!( format!(
"postgresql://{}@{}:{}/{}", "postgresql://{}@{}:{}/{}",
user, "cloud_admin",
self.pg_address.ip(), self.pg_address.ip(),
self.pg_address.port(), self.pg_address.port(),
db_name "postgres"
) )
} }
} }

View File

@@ -6,6 +6,7 @@
//! local installations. //! local installations.
#![deny(clippy::undocumented_unsafe_blocks)] #![deny(clippy::undocumented_unsafe_blocks)]
pub mod attachment_service;
mod background_process; mod background_process;
pub mod broker; pub mod broker;
pub mod endpoint; pub mod endpoint;
@@ -13,4 +14,3 @@ pub mod local_env;
pub mod pageserver; pub mod pageserver;
pub mod postgresql_conf; pub mod postgresql_conf;
pub mod safekeeper; pub mod safekeeper;
pub mod storage_controller;

View File

@@ -72,16 +72,11 @@ pub struct LocalEnv {
#[serde(default)] #[serde(default)]
pub safekeepers: Vec<SafekeeperConf>, pub safekeepers: Vec<SafekeeperConf>,
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will // Control plane location: if None, we will not run attachment_service. If set, this will
// be propagated into each pageserver's configuration. // be propagated into each pageserver's configuration.
#[serde(default)] #[serde(default)]
pub control_plane_api: Option<Url>, pub control_plane_api: Option<Url>,
// Control plane upcall API for storage controller. If set, this will be propagated into the
// storage controller's configuration.
#[serde(default)]
pub control_plane_compute_hook_api: Option<Url>,
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user. /// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
#[serde(default)] #[serde(default)]
// A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here, // A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here,
@@ -114,7 +109,7 @@ impl NeonBroker {
} }
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] #[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
#[serde(default, deny_unknown_fields)] #[serde(default)]
pub struct PageServerConf { pub struct PageServerConf {
// node id // node id
pub id: NodeId, pub id: NodeId,
@@ -126,9 +121,6 @@ pub struct PageServerConf {
// auth type used for the PG and HTTP ports // auth type used for the PG and HTTP ports
pub pg_auth_type: AuthType, pub pg_auth_type: AuthType,
pub http_auth_type: AuthType, pub http_auth_type: AuthType,
pub(crate) virtual_file_io_engine: Option<String>,
pub(crate) get_vectored_impl: Option<String>,
} }
impl Default for PageServerConf { impl Default for PageServerConf {
@@ -139,8 +131,6 @@ impl Default for PageServerConf {
listen_http_addr: String::new(), listen_http_addr: String::new(),
pg_auth_type: AuthType::Trust, pg_auth_type: AuthType::Trust,
http_auth_type: AuthType::Trust, http_auth_type: AuthType::Trust,
virtual_file_io_engine: None,
get_vectored_impl: None,
} }
} }
} }
@@ -232,12 +222,12 @@ impl LocalEnv {
self.neon_distrib_dir.join("pageserver") self.neon_distrib_dir.join("pageserver")
} }
pub fn storage_controller_bin(&self) -> PathBuf { pub fn attachment_service_bin(&self) -> PathBuf {
// Irrespective of configuration, storage controller binary is always // Irrespective of configuration, attachment service binary is always
// run from the same location as neon_local. This means that for compatibility // run from the same location as neon_local. This means that for compatibility
// tests that run old pageserver/safekeeper, they still run latest storage controller. // tests that run old pageserver/safekeeper, they still run latest attachment service.
let neon_local_bin_dir = env::current_exe().unwrap().parent().unwrap().to_owned(); let neon_local_bin_dir = env::current_exe().unwrap().parent().unwrap().to_owned();
neon_local_bin_dir.join("storage_controller") neon_local_bin_dir.join("attachment_service")
} }
pub fn safekeeper_bin(&self) -> PathBuf { pub fn safekeeper_bin(&self) -> PathBuf {
@@ -417,17 +407,14 @@ impl LocalEnv {
// this function is used only for testing purposes in CLI e g generate tokens during init // this function is used only for testing purposes in CLI e g generate tokens during init
pub fn generate_auth_token(&self, claims: &Claims) -> anyhow::Result<String> { pub fn generate_auth_token(&self, claims: &Claims) -> anyhow::Result<String> {
let private_key_path = self.get_private_key_path(); let private_key_path = if self.private_key_path.is_absolute() {
let key_data = fs::read(private_key_path)?;
encode_from_key_file(claims, &key_data)
}
pub fn get_private_key_path(&self) -> PathBuf {
if self.private_key_path.is_absolute() {
self.private_key_path.to_path_buf() self.private_key_path.to_path_buf()
} else { } else {
self.base_data_dir.join(&self.private_key_path) self.base_data_dir.join(&self.private_key_path)
} };
let key_data = fs::read(private_key_path)?;
encode_from_key_file(claims, &key_data)
} }
// //

View File

@@ -30,6 +30,7 @@ use utils::{
lsn::Lsn, lsn::Lsn,
}; };
use crate::attachment_service::{AttachmentService, NodeRegisterRequest};
use crate::local_env::PageServerConf; use crate::local_env::PageServerConf;
use crate::{background_process, local_env::LocalEnv}; use crate::{background_process, local_env::LocalEnv};
@@ -78,39 +79,18 @@ impl PageServerNode {
/// ///
/// These all end up on the command line of the `pageserver` binary. /// These all end up on the command line of the `pageserver` binary.
fn neon_local_overrides(&self, cli_overrides: &[&str]) -> Vec<String> { fn neon_local_overrides(&self, cli_overrides: &[&str]) -> Vec<String> {
let id = format!("id={}", self.conf.id);
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc. // FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
let pg_distrib_dir_param = format!( let pg_distrib_dir_param = format!(
"pg_distrib_dir='{}'", "pg_distrib_dir='{}'",
self.env.pg_distrib_dir_raw().display() self.env.pg_distrib_dir_raw().display()
); );
let PageServerConf { let http_auth_type_param = format!("http_auth_type='{}'", self.conf.http_auth_type);
id, let listen_http_addr_param = format!("listen_http_addr='{}'", self.conf.listen_http_addr);
listen_pg_addr,
listen_http_addr,
pg_auth_type,
http_auth_type,
virtual_file_io_engine,
get_vectored_impl,
} = &self.conf;
let id = format!("id={}", id); let pg_auth_type_param = format!("pg_auth_type='{}'", self.conf.pg_auth_type);
let listen_pg_addr_param = format!("listen_pg_addr='{}'", self.conf.listen_pg_addr);
let http_auth_type_param = format!("http_auth_type='{}'", http_auth_type);
let listen_http_addr_param = format!("listen_http_addr='{}'", listen_http_addr);
let pg_auth_type_param = format!("pg_auth_type='{}'", pg_auth_type);
let listen_pg_addr_param = format!("listen_pg_addr='{}'", listen_pg_addr);
let virtual_file_io_engine = if let Some(virtual_file_io_engine) = virtual_file_io_engine {
format!("virtual_file_io_engine='{virtual_file_io_engine}'")
} else {
String::new()
};
let get_vectored_impl = if let Some(get_vectored_impl) = get_vectored_impl {
format!("get_vectored_impl='{get_vectored_impl}'")
} else {
String::new()
};
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url()); let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
@@ -122,8 +102,6 @@ impl PageServerNode {
listen_http_addr_param, listen_http_addr_param,
listen_pg_addr_param, listen_pg_addr_param,
broker_endpoint_param, broker_endpoint_param,
virtual_file_io_engine,
get_vectored_impl,
]; ];
if let Some(control_plane_api) = &self.env.control_plane_api { if let Some(control_plane_api) = &self.env.control_plane_api {
@@ -132,12 +110,12 @@ impl PageServerNode {
control_plane_api.as_str() control_plane_api.as_str()
)); ));
// Storage controller uses the same auth as pageserver: if JWT is enabled // Attachment service uses the same auth as pageserver: if JWT is enabled
// for us, we will also need it to talk to them. // for us, we will also need it to talk to them.
if matches!(http_auth_type, AuthType::NeonJWT) { if matches!(self.conf.http_auth_type, AuthType::NeonJWT) {
let jwt_token = self let jwt_token = self
.env .env
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi)) .generate_auth_token(&Claims::new(None, Scope::PageServerApi))
.unwrap(); .unwrap();
overrides.push(format!("control_plane_api_token='{}'", jwt_token)); overrides.push(format!("control_plane_api_token='{}'", jwt_token));
} }
@@ -152,7 +130,8 @@ impl PageServerNode {
)); ));
} }
if *http_auth_type != AuthType::Trust || *pg_auth_type != AuthType::Trust { if self.conf.http_auth_type != AuthType::Trust || self.conf.pg_auth_type != AuthType::Trust
{
// Keys are generated in the toplevel repo dir, pageservers' workdirs // Keys are generated in the toplevel repo dir, pageservers' workdirs
// are one level below that, so refer to keys with ../ // are one level below that, so refer to keys with ../
overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned()); overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned());
@@ -183,8 +162,8 @@ impl PageServerNode {
.expect("non-Unicode path") .expect("non-Unicode path")
} }
pub async fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> { pub async fn start(&self, config_overrides: &[&str], register: bool) -> anyhow::Result<()> {
self.start_node(config_overrides, false).await self.start_node(config_overrides, false, register).await
} }
fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> { fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
@@ -222,28 +201,6 @@ impl PageServerNode {
String::from_utf8_lossy(&init_output.stderr), String::from_utf8_lossy(&init_output.stderr),
); );
// Write metadata file, used by pageserver on startup to register itself with
// the storage controller
let metadata_path = datadir.join("metadata.json");
let (_http_host, http_port) =
parse_host_port(&self.conf.listen_http_addr).expect("Unable to parse listen_http_addr");
let http_port = http_port.unwrap_or(9898);
// Intentionally hand-craft JSON: this acts as an implicit format compat test
// in case the pageserver-side structure is edited, and reflects the real life
// situation: the metadata is written by some other script.
std::fs::write(
metadata_path,
serde_json::to_vec(&serde_json::json!({
"host": "localhost",
"port": self.pg_connection_config.port(),
"http_host": "localhost",
"http_port": http_port,
}))
.unwrap(),
)
.expect("Failed to write metadata file");
Ok(()) Ok(())
} }
@@ -251,6 +208,7 @@ impl PageServerNode {
&self, &self,
config_overrides: &[&str], config_overrides: &[&str],
update_config: bool, update_config: bool,
register: bool,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// TODO: using a thread here because start_process() is not async but we need to call check_status() // TODO: using a thread here because start_process() is not async but we need to call check_status()
let datadir = self.repo_path(); let datadir = self.repo_path();
@@ -290,6 +248,23 @@ impl PageServerNode {
) )
.await?; .await?;
if register {
let attachment_service = AttachmentService::from_env(&self.env);
let (pg_host, pg_port) =
parse_host_port(&self.conf.listen_pg_addr).expect("Unable to parse listen_pg_addr");
let (http_host, http_port) = parse_host_port(&self.conf.listen_http_addr)
.expect("Unable to parse listen_http_addr");
attachment_service
.node_register(NodeRegisterRequest {
node_id: self.conf.id,
listen_pg_addr: pg_host.to_string(),
listen_pg_port: pg_port.unwrap_or(5432),
listen_http_addr: http_host.to_string(),
listen_http_port: http_port.unwrap_or(80),
})
.await?;
}
Ok(()) Ok(())
} }
@@ -375,11 +350,6 @@ impl PageServerNode {
.remove("compaction_threshold") .remove("compaction_threshold")
.map(|x| x.parse::<usize>()) .map(|x| x.parse::<usize>())
.transpose()?, .transpose()?,
compaction_algorithm: settings
.remove("compaction_algorithm")
.map(serde_json::from_str)
.transpose()
.context("Failed to parse 'compaction_algorithm' json")?,
gc_horizon: settings gc_horizon: settings
.remove("gc_horizon") .remove("gc_horizon")
.map(|x| x.parse::<u64>()) .map(|x| x.parse::<u64>())
@@ -419,17 +389,12 @@ impl PageServerNode {
evictions_low_residence_duration_metric_threshold: settings evictions_low_residence_duration_metric_threshold: settings
.remove("evictions_low_residence_duration_metric_threshold") .remove("evictions_low_residence_duration_metric_threshold")
.map(|x| x.to_string()), .map(|x| x.to_string()),
heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()), gc_feedback: settings
lazy_slru_download: settings .remove("gc_feedback")
.remove("lazy_slru_download")
.map(|x| x.parse::<bool>()) .map(|x| x.parse::<bool>())
.transpose() .transpose()
.context("Failed to parse 'lazy_slru_download' as bool")?, .context("Failed to parse 'gc_feedback' as bool")?,
timeline_get_throttle: settings heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()),
.remove("timeline_get_throttle")
.map(serde_json::from_str)
.transpose()
.context("parse `timeline_get_throttle` from json")?,
}; };
if !settings.is_empty() { if !settings.is_empty() {
bail!("Unrecognized tenant settings: {settings:?}") bail!("Unrecognized tenant settings: {settings:?}")
@@ -451,8 +416,6 @@ impl PageServerNode {
generation, generation,
config, config,
shard_parameters: ShardParameters::default(), shard_parameters: ShardParameters::default(),
// Placement policy is not meaningful for creations not done via storage controller
placement_policy: None,
}; };
if !settings.is_empty() { if !settings.is_empty() {
bail!("Unrecognized tenant settings: {settings:?}") bail!("Unrecognized tenant settings: {settings:?}")
@@ -485,11 +448,6 @@ impl PageServerNode {
.map(|x| x.parse::<usize>()) .map(|x| x.parse::<usize>())
.transpose() .transpose()
.context("Failed to parse 'compaction_threshold' as an integer")?, .context("Failed to parse 'compaction_threshold' as an integer")?,
compaction_algorithm: settings
.remove("compactin_algorithm")
.map(serde_json::from_str)
.transpose()
.context("Failed to parse 'compaction_algorithm' json")?,
gc_horizon: settings gc_horizon: settings
.remove("gc_horizon") .remove("gc_horizon")
.map(|x| x.parse::<u64>()) .map(|x| x.parse::<u64>())
@@ -531,17 +489,12 @@ impl PageServerNode {
evictions_low_residence_duration_metric_threshold: settings evictions_low_residence_duration_metric_threshold: settings
.remove("evictions_low_residence_duration_metric_threshold") .remove("evictions_low_residence_duration_metric_threshold")
.map(|x| x.to_string()), .map(|x| x.to_string()),
heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()), gc_feedback: settings
lazy_slru_download: settings .remove("gc_feedback")
.remove("lazy_slru_download")
.map(|x| x.parse::<bool>()) .map(|x| x.parse::<bool>())
.transpose() .transpose()
.context("Failed to parse 'lazy_slru_download' as bool")?, .context("Failed to parse 'gc_feedback' as bool")?,
timeline_get_throttle: settings heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()),
.remove("timeline_get_throttle")
.map(serde_json::from_str)
.transpose()
.context("parse `timeline_get_throttle` from json")?,
} }
}; };
@@ -561,11 +514,10 @@ impl PageServerNode {
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
config: LocationConfig, config: LocationConfig,
flush_ms: Option<Duration>, flush_ms: Option<Duration>,
lazy: bool,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
Ok(self Ok(self
.http_client .http_client
.location_config(tenant_shard_id, config, flush_ms, lazy) .location_config(tenant_shard_id, config, flush_ms)
.await?) .await?)
} }
@@ -576,6 +528,13 @@ impl PageServerNode {
Ok(self.http_client.list_timelines(*tenant_shard_id).await?) Ok(self.http_client.list_timelines(*tenant_shard_id).await?)
} }
pub async fn tenant_secondary_download(&self, tenant_id: &TenantShardId) -> anyhow::Result<()> {
Ok(self
.http_client
.tenant_secondary_download(*tenant_id)
.await?)
}
pub async fn timeline_create( pub async fn timeline_create(
&self, &self,
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
@@ -623,7 +582,7 @@ impl PageServerNode {
eprintln!("connection error: {}", e); eprintln!("connection error: {}", e);
} }
}); });
let client = std::pin::pin!(client); tokio::pin!(client);
// Init base reader // Init base reader
let (start_lsn, base_tarfile_path) = base; let (start_lsn, base_tarfile_path) = base;

View File

@@ -70,9 +70,6 @@ Should only be used e.g. for status check/tenant creation/list.
Should only be used e.g. for status check. Should only be used e.g. for status check.
Currently also used for connection from any pageserver to any safekeeper. Currently also used for connection from any pageserver to any safekeeper.
"generations_api": Provides access to the upcall APIs served by the storage controller or the control plane.
"admin": Provides access to the control plane and admin APIs of the storage controller.
### CLI ### CLI
CLI generates a key pair during call to `neon_local init` with the following commands: CLI generates a key pair during call to `neon_local init` with the following commands:

View File

@@ -21,7 +21,7 @@ We build all images after a successful `release` tests run and push automaticall
## Docker Compose example ## Docker Compose example
You can see a [docker compose](https://docs.docker.com/compose/) example to create a neon cluster in [/docker-compose/docker-compose.yml](/docker-compose/docker-compose.yml). It creates the following containers. You can see a [docker compose](https://docs.docker.com/compose/) example to create a neon cluster in [/docker-compose/docker-compose.yml](/docker-compose/docker-compose.yml). It creates the following conatainers.
- pageserver x 1 - pageserver x 1
- safekeeper x 3 - safekeeper x 3
@@ -38,7 +38,7 @@ You can specify version of neon cluster using following environment values.
- TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) - TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml)
``` ```
$ cd docker-compose/ $ cd docker-compose/
$ docker-compose down # remove the containers if exists $ docker-compose down # remove the conainers if exists
$ PG_VERSION=15 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version $ PG_VERSION=15 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version
Creating network "dockercompose_default" with the default driver Creating network "dockercompose_default" with the default driver
Creating docker-compose_storage_broker_1 ... done Creating docker-compose_storage_broker_1 ... done

View File

@@ -64,7 +64,7 @@ Storage.
The LayerMap tracks what layers exist in a timeline. The LayerMap tracks what layers exist in a timeline.
Currently, the layer map is just a resizable array (Vec). On a GetPage@LSN or Currently, the layer map is just a resizeable array (Vec). On a GetPage@LSN or
other read request, the layer map scans through the array to find the right layer other read request, the layer map scans through the array to find the right layer
that contains the data for the requested page. The read-code in LayeredTimeline that contains the data for the requested page. The read-code in LayeredTimeline
is aware of the ancestor, and returns data from the ancestor timeline if it's is aware of the ancestor, and returns data from the ancestor timeline if it's

View File

@@ -22,7 +22,7 @@ timeline to shutdown. It will also wait for them to finish.
A task registered in the task registry can check if it has been A task registered in the task registry can check if it has been
requested to shut down, by calling `is_shutdown_requested()`. There's requested to shut down, by calling `is_shutdown_requested()`. There's
also a `shutdown_watcher()` Future that can be used with `tokio::select!` also a `shudown_watcher()` Future that can be used with `tokio::select!`
or similar, to wake up on shutdown. or similar, to wake up on shutdown.

View File

@@ -74,4 +74,4 @@ somewhat wasteful, but because most WAL records only affect one page,
the overhead is acceptable. the overhead is acceptable.
The WAL redo always happens for one particular page. If the WAL record The WAL redo always happens for one particular page. If the WAL record
contains changes to other pages, they are ignored. coantains changes to other pages, they are ignored.

View File

@@ -1,4 +1,4 @@
# Neon storage node — alternative # Zenith storage node — alternative
## **Design considerations** ## **Design considerations**

View File

@@ -1,6 +1,6 @@
# Command line interface (end-user) # Command line interface (end-user)
Neon CLI as it is described here mostly resides on the same conceptual level as pg_ctl/initdb/pg_recvxlog/etc and replaces some of them in an opinionated way. I would also suggest bundling our patched postgres inside neon distribution at least at the start. Zenith CLI as it is described here mostly resides on the same conceptual level as pg_ctl/initdb/pg_recvxlog/etc and replaces some of them in an opinionated way. I would also suggest bundling our patched postgres inside zenith distribution at least at the start.
This proposal is focused on managing local installations. For cluster operations, different tooling would be needed. The point of integration between the two is storage URL: no matter how complex cluster setup is it may provide an endpoint where the user may push snapshots. This proposal is focused on managing local installations. For cluster operations, different tooling would be needed. The point of integration between the two is storage URL: no matter how complex cluster setup is it may provide an endpoint where the user may push snapshots.
@@ -8,40 +8,40 @@ The most important concept here is a snapshot, which can be created/pushed/pulle
# Possible usage scenarios # Possible usage scenarios
## Install neon, run a postgres ## Install zenith, run a postgres
``` ```
> brew install pg-neon > brew install pg-zenith
> neon pg create # creates pgdata with default pattern pgdata$i > zenith pg create # creates pgdata with default pattern pgdata$i
> neon pg list > zenith pg list
ID PGDATA USED STORAGE ENDPOINT ID PGDATA USED STORAGE ENDPOINT
primary1 pgdata1 0G neon-local localhost:5432 primary1 pgdata1 0G zenith-local localhost:5432
``` ```
## Import standalone postgres to neon ## Import standalone postgres to zenith
``` ```
> neon snapshot import --from=basebackup://replication@localhost:5432/ oldpg > zenith snapshot import --from=basebackup://replication@localhost:5432/ oldpg
[====================------------] 60% | 20MB/s [====================------------] 60% | 20MB/s
> neon snapshot list > zenith snapshot list
ID SIZE PARENT ID SIZE PARENT
oldpg 5G - oldpg 5G -
> neon pg create --snapshot oldpg > zenith pg create --snapshot oldpg
Started postgres on localhost:5432 Started postgres on localhost:5432
> neon pg list > zenith pg list
ID PGDATA USED STORAGE ENDPOINT ID PGDATA USED STORAGE ENDPOINT
primary1 pgdata1 5G neon-local localhost:5432 primary1 pgdata1 5G zenith-local localhost:5432
> neon snapshot destroy oldpg > zenith snapshot destroy oldpg
Ok Ok
``` ```
Also, we may start snapshot import implicitly by looking at snapshot schema Also, we may start snapshot import implicitly by looking at snapshot schema
``` ```
> neon pg create --snapshot basebackup://replication@localhost:5432/ > zenith pg create --snapshot basebackup://replication@localhost:5432/
Downloading snapshot... Done. Downloading snapshot... Done.
Started postgres on localhost:5432 Started postgres on localhost:5432
Destroying snapshot... Done. Destroying snapshot... Done.
@@ -52,39 +52,39 @@ Destroying snapshot... Done.
Since we may export the whole snapshot as one big file (tar of basebackup, maybe with some manifest) it may be shared over conventional means: http, ssh, [git+lfs](https://docs.github.com/en/github/managing-large-files/about-git-large-file-storage). Since we may export the whole snapshot as one big file (tar of basebackup, maybe with some manifest) it may be shared over conventional means: http, ssh, [git+lfs](https://docs.github.com/en/github/managing-large-files/about-git-large-file-storage).
``` ```
> neon pg create --snapshot http://learn-postgres.com/movies_db.neon movies > zenith pg create --snapshot http://learn-postgres.com/movies_db.zenith movies
``` ```
## Create snapshot and push it to the cloud ## Create snapshot and push it to the cloud
``` ```
> neon snapshot create pgdata1@snap1 > zenith snapshot create pgdata1@snap1
> neon snapshot push --to ssh://stas@neon.tech pgdata1@snap1 > zenith snapshot push --to ssh://stas@zenith.tech pgdata1@snap1
``` ```
## Rollback database to the snapshot ## Rollback database to the snapshot
One way to rollback the database is just to init a new database from the snapshot and destroy the old one. But creating a new database from a snapshot would require a copy of that snapshot which is time consuming operation. Another option that would be cool to support is the ability to create the copy-on-write database from the snapshot without copying data, and store updated pages in a separate location, however that way would have performance implications. So to properly rollback the database to the older state we have `neon pg checkout`. One way to rollback the database is just to init a new database from the snapshot and destroy the old one. But creating a new database from a snapshot would require a copy of that snapshot which is time consuming operation. Another option that would be cool to support is the ability to create the copy-on-write database from the snapshot without copying data, and store updated pages in a separate location, however that way would have performance implications. So to properly rollback the database to the older state we have `zenith pg checkout`.
``` ```
> neon pg list > zenith pg list
ID PGDATA USED STORAGE ENDPOINT ID PGDATA USED STORAGE ENDPOINT
primary1 pgdata1 5G neon-local localhost:5432 primary1 pgdata1 5G zenith-local localhost:5432
> neon snapshot create pgdata1@snap1 > zenith snapshot create pgdata1@snap1
> neon snapshot list > zenith snapshot list
ID SIZE PARENT ID SIZE PARENT
oldpg 5G - oldpg 5G -
pgdata1@snap1 6G - pgdata1@snap1 6G -
pgdata1@CURRENT 6G - pgdata1@CURRENT 6G -
> neon pg checkout pgdata1@snap1 > zenith pg checkout pgdata1@snap1
Stopping postgres on pgdata1. Stopping postgres on pgdata1.
Rolling back pgdata1@CURRENT to pgdata1@snap1. Rolling back pgdata1@CURRENT to pgdata1@snap1.
Starting postgres on pgdata1. Starting postgres on pgdata1.
> neon snapshot list > zenith snapshot list
ID SIZE PARENT ID SIZE PARENT
oldpg 5G - oldpg 5G -
pgdata1@snap1 6G - pgdata1@snap1 6G -
@@ -99,7 +99,7 @@ Some notes: pgdata1@CURRENT -- implicit snapshot representing the current state
PITR area acts like a continuous snapshot where you can reset the database to any point in time within this area (by area I mean some TTL period or some size limit, both possibly infinite). PITR area acts like a continuous snapshot where you can reset the database to any point in time within this area (by area I mean some TTL period or some size limit, both possibly infinite).
``` ```
> neon pitr create --storage s3tank --ttl 30d --name pitr_last_month > zenith pitr create --storage s3tank --ttl 30d --name pitr_last_month
``` ```
Resetting the database to some state in past would require creating a snapshot on some lsn / time in this pirt area. Resetting the database to some state in past would require creating a snapshot on some lsn / time in this pirt area.
@@ -108,29 +108,29 @@ Resetting the database to some state in past would require creating a snapshot o
## storage ## storage
Storage is either neon pagestore or s3. Users may create a database in a pagestore and create/move *snapshots* and *pitr regions* in both pagestore and s3. Storage is a concept similar to `git remote`. After installation, I imagine one local storage is available by default. Storage is either zenith pagestore or s3. Users may create a database in a pagestore and create/move *snapshots* and *pitr regions* in both pagestore and s3. Storage is a concept similar to `git remote`. After installation, I imagine one local storage is available by default.
**neon storage attach** -t [native|s3] -c key=value -n name **zenith storage attach** -t [native|s3] -c key=value -n name
Attaches/initializes storage. For --type=s3, user credentials and path should be provided. For --type=native we may support --path=/local/path and --url=neon.tech/stas/mystore. Other possible term for native is 'zstore'. Attaches/initializes storage. For --type=s3, user credentials and path should be provided. For --type=native we may support --path=/local/path and --url=zenith.tech/stas/mystore. Other possible term for native is 'zstore'.
**neon storage list** **zenith storage list**
Show currently attached storages. For example: Show currently attached storages. For example:
``` ```
> neon storage list > zenith storage list
NAME USED TYPE OPTIONS PATH NAME USED TYPE OPTIONS PATH
local 5.1G neon-local /opt/neon/store/local local 5.1G zenith-local /opt/zenith/store/local
local.compr 20.4G neon-local compression=on /opt/neon/store/local.compr local.compr 20.4G zenith-local compression=on /opt/zenith/store/local.compr
zcloud 60G neon-remote neon.tech/stas/mystore zcloud 60G zenith-remote zenith.tech/stas/mystore
s3tank 80G S3 s3tank 80G S3
``` ```
**neon storage detach** **zenith storage detach**
**neon storage show** **zenith storage show**
@@ -140,29 +140,29 @@ Manages postgres data directories and can start postgres instances with proper c
Pg is a term for a single postgres running on some data. I'm trying to avoid separation of datadir management and postgres instance management -- both that concepts bundled here together. Pg is a term for a single postgres running on some data. I'm trying to avoid separation of datadir management and postgres instance management -- both that concepts bundled here together.
**neon pg create** [--no-start --snapshot --cow] -s storage-name -n pgdata **zenith pg create** [--no-start --snapshot --cow] -s storage-name -n pgdata
Creates (initializes) new data directory in given storage and starts postgres. I imagine that storage for this operation may be only local and data movement to remote location happens through snapshots/pitr. Creates (initializes) new data directory in given storage and starts postgres. I imagine that storage for this operation may be only local and data movement to remote location happens through snapshots/pitr.
--no-start: just init datadir without creating --no-start: just init datadir without creating
--snapshot snap: init from the snapshot. Snap is a name or URL (neon.tech/stas/mystore/snap1) --snapshot snap: init from the snapshot. Snap is a name or URL (zenith.tech/stas/mystore/snap1)
--cow: initialize Copy-on-Write data directory on top of some snapshot (makes sense if it is a snapshot of currently running a database) --cow: initialize Copy-on-Write data directory on top of some snapshot (makes sense if it is a snapshot of currently running a database)
**neon pg destroy** **zenith pg destroy**
**neon pg start** [--replica] pgdata **zenith pg start** [--replica] pgdata
Start postgres with proper extensions preloaded/installed. Start postgres with proper extensions preloaded/installed.
**neon pg checkout** **zenith pg checkout**
Rollback data directory to some previous snapshot. Rollback data directory to some previous snapshot.
**neon pg stop** pg_id **zenith pg stop** pg_id
**neon pg list** **zenith pg list**
``` ```
ROLE PGDATA USED STORAGE ENDPOINT ROLE PGDATA USED STORAGE ENDPOINT
@@ -173,7 +173,7 @@ primary my_pg2 3.2G local.compr localhost:5435
- my_pg3 9.2G local.compr - - my_pg3 9.2G local.compr -
``` ```
**neon pg show** **zenith pg show**
``` ```
my_pg: my_pg:
@@ -194,7 +194,7 @@ my_pg:
``` ```
**neon pg start-rest/graphql** pgdata **zenith pg start-rest/graphql** pgdata
Starts REST/GraphQL proxy on top of postgres master. Not sure we should do that, just an idea. Starts REST/GraphQL proxy on top of postgres master. Not sure we should do that, just an idea.
@@ -203,35 +203,35 @@ Starts REST/GraphQL proxy on top of postgres master. Not sure we should do that,
Snapshot creation is cheap -- no actual data is copied, we just start retaining old pages. Snapshot size means the amount of retained data, not all data. Snapshot name looks like pgdata_name@tag_name. tag_name is set by the user during snapshot creation. There are some reserved tag names: CURRENT represents the current state of the data directory; HEAD{i} represents the data directory state that resided in the database before i-th checkout. Snapshot creation is cheap -- no actual data is copied, we just start retaining old pages. Snapshot size means the amount of retained data, not all data. Snapshot name looks like pgdata_name@tag_name. tag_name is set by the user during snapshot creation. There are some reserved tag names: CURRENT represents the current state of the data directory; HEAD{i} represents the data directory state that resided in the database before i-th checkout.
**neon snapshot create** pgdata_name@snap_name **zenith snapshot create** pgdata_name@snap_name
Creates a new snapshot in the same storage where pgdata_name exists. Creates a new snapshot in the same storage where pgdata_name exists.
**neon snapshot push** --to url pgdata_name@snap_name **zenith snapshot push** --to url pgdata_name@snap_name
Produces binary stream of a given snapshot. Under the hood starts temp read-only postgres over this snapshot and sends basebackup stream. Receiving side should start `neon snapshot recv` before push happens. If url has some special schema like neon:// receiving side may require auth start `neon snapshot recv` on the go. Produces binary stream of a given snapshot. Under the hood starts temp read-only postgres over this snapshot and sends basebackup stream. Receiving side should start `zenith snapshot recv` before push happens. If url has some special schema like zenith:// receiving side may require auth start `zenith snapshot recv` on the go.
**neon snapshot recv** **zenith snapshot recv**
Starts a port listening for a basebackup stream, prints connection info to stdout (so that user may use that in push command), and expects data on that socket. Starts a port listening for a basebackup stream, prints connection info to stdout (so that user may use that in push command), and expects data on that socket.
**neon snapshot pull** --from url or path **zenith snapshot pull** --from url or path
Connects to a remote neon/s3/file and pulls snapshot. The remote site should be neon service or files in our format. Connects to a remote zenith/s3/file and pulls snapshot. The remote site should be zenith service or files in our format.
**neon snapshot import** --from basebackup://<...> or path **zenith snapshot import** --from basebackup://<...> or path
Creates a new snapshot out of running postgres via basebackup protocol or basebackup files. Creates a new snapshot out of running postgres via basebackup protocol or basebackup files.
**neon snapshot export** **zenith snapshot export**
Starts read-only postgres over this snapshot and exports data in some format (pg_dump, or COPY TO on some/all tables). One of the options may be neon own format which is handy for us (but I think just tar of basebackup would be okay). Starts read-only postgres over this snapshot and exports data in some format (pg_dump, or COPY TO on some/all tables). One of the options may be zenith own format which is handy for us (but I think just tar of basebackup would be okay).
**neon snapshot diff** snap1 snap2 **zenith snapshot diff** snap1 snap2
Shows size of data changed between two snapshots. We also may provide options to diff schema/data in tables. To do that start temp read-only postgreses. Shows size of data changed between two snapshots. We also may provide options to diff schema/data in tables. To do that start temp read-only postgreses.
**neon snapshot destroy** **zenith snapshot destroy**
## pitr ## pitr
@@ -239,7 +239,7 @@ Pitr represents wal stream and ttl policy for that stream
XXX: any suggestions on a better name? XXX: any suggestions on a better name?
**neon pitr create** name **zenith pitr create** name
--ttl = inf | period --ttl = inf | period
@@ -247,21 +247,21 @@ XXX: any suggestions on a better name?
--storage = storage_name --storage = storage_name
**neon pitr extract-snapshot** pitr_name --lsn xxx **zenith pitr extract-snapshot** pitr_name --lsn xxx
Creates a snapshot out of some lsn in PITR area. The obtained snapshot may be managed with snapshot routines (move/send/export) Creates a snapshot out of some lsn in PITR area. The obtained snapshot may be managed with snapshot routines (move/send/export)
**neon pitr gc** pitr_name **zenith pitr gc** pitr_name
Force garbage collection on some PITR area. Force garbage collection on some PITR area.
**neon pitr list** **zenith pitr list**
**neon pitr destroy** **zenith pitr destroy**
## console ## console
**neon console** **zenith console**
Opens browser targeted at web console with the more or less same functionality as described here. Opens browser targeted at web console with the more or less same functionality as described here.

View File

@@ -6,7 +6,7 @@ When do we consider the WAL record as durable, so that we can
acknowledge the commit to the client and be reasonably certain that we acknowledge the commit to the client and be reasonably certain that we
will not lose the transaction? will not lose the transaction?
Neon uses a group of WAL safekeeper nodes to hold the generated WAL. Zenith uses a group of WAL safekeeper nodes to hold the generated WAL.
A WAL record is considered durable, when it has been written to a A WAL record is considered durable, when it has been written to a
majority of WAL safekeeper nodes. In this document, I use 5 majority of WAL safekeeper nodes. In this document, I use 5
safekeepers, because I have five fingers. A WAL record is durable, safekeepers, because I have five fingers. A WAL record is durable,

View File

@@ -1,23 +1,23 @@
# Neon local # Zenith local
Here I list some objectives to keep in mind when discussing neon-local design and a proposal that brings all components together. Your comments on both parts are very welcome. Here I list some objectives to keep in mind when discussing zenith-local design and a proposal that brings all components together. Your comments on both parts are very welcome.
#### Why do we need it? #### Why do we need it?
- For distribution - this easy to use binary will help us to build adoption among developers. - For distribution - this easy to use binary will help us to build adoption among developers.
- For internal use - to test all components together. - For internal use - to test all components together.
In my understanding, we consider it to be just a mock-up version of neon-cloud. In my understanding, we consider it to be just a mock-up version of zenith-cloud.
> Question: How much should we care about durability and security issues for a local setup? > Question: How much should we care about durability and security issues for a local setup?
#### Why is it better than a simple local postgres? #### Why is it better than a simple local postgres?
- Easy one-line setup. As simple as `cargo install neon && neon start` - Easy one-line setup. As simple as `cargo install zenith && zenith start`
- Quick and cheap creation of compute nodes over the same storage. - Quick and cheap creation of compute nodes over the same storage.
> Question: How can we describe a use-case for this feature? > Question: How can we describe a use-case for this feature?
- Neon-local can work with S3 directly. - Zenith-local can work with S3 directly.
- Push and pull images (snapshots) to remote S3 to exchange data with other users. - Push and pull images (snapshots) to remote S3 to exchange data with other users.
@@ -31,50 +31,50 @@ Ideally, just one binary that incorporates all elements we need.
#### Components: #### Components:
- **neon-CLI** - interface for end-users. Turns commands to REST requests and handles responses to show them in a user-friendly way. - **zenith-CLI** - interface for end-users. Turns commands to REST requests and handles responses to show them in a user-friendly way.
CLI proposal is here https://github.com/neondatabase/rfcs/blob/003-laptop-cli.md/003-laptop-cli.md CLI proposal is here https://github.com/libzenith/rfcs/blob/003-laptop-cli.md/003-laptop-cli.md
WIP code is here: https://github.com/neondatabase/postgres/tree/main/pageserver/src/bin/cli WIP code is here: https://github.com/libzenith/postgres/tree/main/pageserver/src/bin/cli
- **neon-console** - WEB UI with same functionality as CLI. - **zenith-console** - WEB UI with same functionality as CLI.
>Note: not for the first release. >Note: not for the first release.
- **neon-local** - entrypoint. Service that starts all other components and handles REST API requests. See REST API proposal below. - **zenith-local** - entrypoint. Service that starts all other components and handles REST API requests. See REST API proposal below.
> Idea: spawn all other components as child processes, so that we could shutdown everything by stopping neon-local. > Idea: spawn all other components as child processes, so that we could shutdown everything by stopping zenith-local.
- **neon-pageserver** - consists of a storage and WAL-replaying service (modified PG in current implementation). - **zenith-pageserver** - consists of a storage and WAL-replaying service (modified PG in current implementation).
> Question: Probably, for local setup we should be able to bypass page-storage and interact directly with S3 to avoid double caching in shared buffers and page-server? > Question: Probably, for local setup we should be able to bypass page-storage and interact directly with S3 to avoid double caching in shared buffers and page-server?
WIP code is here: https://github.com/neondatabase/postgres/tree/main/pageserver/src WIP code is here: https://github.com/libzenith/postgres/tree/main/pageserver/src
- **neon-S3** - stores base images of the database and WAL in S3 object storage. Import and export images from/to neon. - **zenith-S3** - stores base images of the database and WAL in S3 object storage. Import and export images from/to zenith.
> Question: How should it operate in a local setup? Will we manage it ourselves or ask user to provide credentials for existing S3 object storage (i.e. minio)? > Question: How should it operate in a local setup? Will we manage it ourselves or ask user to provide credentials for existing S3 object storage (i.e. minio)?
> Question: Do we use it together with local page store or they are interchangeable? > Question: Do we use it together with local page store or they are interchangeable?
WIP code is ??? WIP code is ???
- **neon-safekeeper** - receives WAL from postgres, stores it durably, answers to Postgres that "sync" is succeed. - **zenith-safekeeper** - receives WAL from postgres, stores it durably, answers to Postgres that "sync" is succeed.
> Question: How should it operate in a local setup? In my understanding it should push WAL directly to S3 (if we use it) or store all data locally (if we use local page storage). The latter option seems meaningless (extra overhead and no gain), but it is still good to test the system. > Question: How should it operate in a local setup? In my understanding it should push WAL directly to S3 (if we use it) or store all data locally (if we use local page storage). The latter option seems meaningless (extra overhead and no gain), but it is still good to test the system.
WIP code is here: https://github.com/neondatabase/postgres/tree/main/src/bin/safekeeper WIP code is here: https://github.com/libzenith/postgres/tree/main/src/bin/safekeeper
- **neon-computenode** - bottomless PostgreSQL, ideally upstream, but for a start - our modified version. User can quickly create and destroy them and work with it as a regular postgres database. - **zenith-computenode** - bottomless PostgreSQL, ideally upstream, but for a start - our modified version. User can quickly create and destroy them and work with it as a regular postgres database.
WIP code is in main branch and here: https://github.com/neondatabase/postgres/commits/compute_node WIP code is in main branch and here: https://github.com/libzenith/postgres/commits/compute_node
#### REST API: #### REST API:
Service endpoint: `http://localhost:3000` Service endpoint: `http://localhost:3000`
Resources: Resources:
- /storages - Where data lives: neon-pageserver or neon-s3 - /storages - Where data lives: zenith-pageserver or zenith-s3
- /pgs - Postgres - neon-computenode - /pgs - Postgres - zenith-computenode
- /snapshots - snapshots **TODO** - /snapshots - snapshots **TODO**
>Question: Do we want to extend this API to manage neon components? I.e. start page-server, manage safekeepers and so on? Or they will be hardcoded to just start once and for all? >Question: Do we want to extend this API to manage zenith components? I.e. start page-server, manage safekeepers and so on? Or they will be hardcoded to just start once and for all?
Methods and their mapping to CLI: Methods and their mapping to CLI:
- /storages - neon-pageserver or neon-s3 - /storages - zenith-pageserver or zenith-s3
CLI | REST API CLI | REST API
------------- | ------------- ------------- | -------------
@@ -84,7 +84,7 @@ storage list | GET /storages
storage show -n name | GET /storages/:storage_name storage show -n name | GET /storages/:storage_name
- /pgs - neon-computenode - /pgs - zenith-computenode
CLI | REST API CLI | REST API
------------- | ------------- ------------- | -------------

View File

@@ -1,45 +1,45 @@
Neon CLI allows you to operate database clusters (catalog clusters) and their commit history locally and in the cloud. Since ANSI calls them catalog clusters and cluster is a loaded term in the modern infrastructure we will call it "catalog". Zenith CLI allows you to operate database clusters (catalog clusters) and their commit history locally and in the cloud. Since ANSI calls them catalog clusters and cluster is a loaded term in the modern infrastructure we will call it "catalog".
# CLI v2 (after chatting with Carl) # CLI v2 (after chatting with Carl)
Neon introduces the notion of a repository. Zenith introduces the notion of a repository.
```bash ```bash
neon init zenith init
neon clone neon://neon.tech/piedpiper/northwind -- clones a repo to the northwind directory zenith clone zenith://zenith.tech/piedpiper/northwind -- clones a repo to the northwind directory
``` ```
Once you have a cluster catalog you can explore it Once you have a cluster catalog you can explore it
```bash ```bash
neon log -- returns a list of commits zenith log -- returns a list of commits
neon status -- returns if there are changes in the catalog that can be committed zenith status -- returns if there are changes in the catalog that can be committed
neon commit -- commits the changes and generates a new commit hash zenith commit -- commits the changes and generates a new commit hash
neon branch experimental <hash> -- creates a branch called testdb based on a given commit hash zenith branch experimental <hash> -- creates a branch called testdb based on a given commit hash
``` ```
To make changes in the catalog you need to run compute nodes To make changes in the catalog you need to run compute nodes
```bash ```bash
-- here is how you a compute node -- here is how you a compute node
neon start /home/pipedpiper/northwind:main -- starts a compute instance zenith start /home/pipedpiper/northwind:main -- starts a compute instance
neon start neon://neon.tech/northwind:main -- starts a compute instance in the cloud zenith start zenith://zenith.tech/northwind:main -- starts a compute instance in the cloud
-- you can start a compute node against any hash or branch -- you can start a compute node against any hash or branch
neon start /home/pipedpiper/northwind:experimental --port 8008 -- start another compute instance (on different port) zenith start /home/pipedpiper/northwind:experimental --port 8008 -- start another compute instance (on different port)
-- you can start a compute node against any hash or branch -- you can start a compute node against any hash or branch
neon start /home/pipedpiper/northwind:<hash> --port 8009 -- start another compute instance (on different port) zenith start /home/pipedpiper/northwind:<hash> --port 8009 -- start another compute instance (on different port)
-- After running some DML you can run -- After running some DML you can run
-- neon status and see how there are two WAL streams one on top of -- zenith status and see how there are two WAL streams one on top of
-- the main branch -- the main branch
neon status zenith status
-- and another on top of the experimental branch -- and another on top of the experimental branch
neon status -b experimental zenith status -b experimental
-- you can commit each branch separately -- you can commit each branch separately
neon commit main zenith commit main
-- or -- or
neon commit -c /home/pipedpiper/northwind:experimental zenith commit -c /home/pipedpiper/northwind:experimental
``` ```
Starting compute instances against cloud environments Starting compute instances against cloud environments
@@ -47,18 +47,18 @@ Starting compute instances against cloud environments
```bash ```bash
-- you can start a compute instance against the cloud environment -- you can start a compute instance against the cloud environment
-- in this case all of the changes will be streamed into the cloud -- in this case all of the changes will be streamed into the cloud
neon start https://neon:tecj/pipedpiper/northwind:main zenith start https://zenith:tech/pipedpiper/northwind:main
neon start https://neon:tecj/pipedpiper/northwind:main zenith start https://zenith:tech/pipedpiper/northwind:main
neon status -c https://neon:tecj/pipedpiper/northwind:main zenith status -c https://zenith:tech/pipedpiper/northwind:main
neon commit -c https://neon:tecj/pipedpiper/northwind:main zenith commit -c https://zenith:tech/pipedpiper/northwind:main
neon branch -c https://neon:tecj/pipedpiper/northwind:<hash> experimental zenith branch -c https://zenith:tech/pipedpiper/northwind:<hash> experimental
``` ```
Pushing data into the cloud Pushing data into the cloud
```bash ```bash
-- pull all the commits from the cloud -- pull all the commits from the cloud
neon pull zenith pull
-- push all the commits to the cloud -- push all the commits to the cloud
neon push zenith push
``` ```

View File

@@ -1,14 +1,14 @@
# Repository format # Repository format
A Neon repository is similar to a traditional PostgreSQL backup A Zenith repository is similar to a traditional PostgreSQL backup
archive, like a WAL-G bucket or pgbarman backup catalogue. It holds archive, like a WAL-G bucket or pgbarman backup catalogue. It holds
multiple versions of a PostgreSQL database cluster. multiple versions of a PostgreSQL database cluster.
The distinguishing feature is that you can launch a Neon Postgres The distinguishing feature is that you can launch a Zenith Postgres
server directly against a branch in the repository, without having to server directly against a branch in the repository, without having to
"restore" it first. Also, Neon manages the storage automatically, "restore" it first. Also, Zenith manages the storage automatically,
there is no separation between full and incremental backups nor WAL there is no separation between full and incremental backups nor WAL
archive. Neon relies heavily on the WAL, and uses concepts similar archive. Zenith relies heavily on the WAL, and uses concepts similar
to incremental backups and WAL archiving internally, but it is hidden to incremental backups and WAL archiving internally, but it is hidden
from the user. from the user.
@@ -19,15 +19,15 @@ efficient. Just something to get us started.
The repository directory looks like this: The repository directory looks like this:
.neon/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/wal/ .zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/wal/
.neon/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/snapshots/<lsn>/ .zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/snapshots/<lsn>/
.neon/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/history .zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/history
.neon/refs/branches/mybranch .zenith/refs/branches/mybranch
.neon/refs/tags/foo .zenith/refs/tags/foo
.neon/refs/tags/bar .zenith/refs/tags/bar
.neon/datadirs/<timeline uuid> .zenith/datadirs/<timeline uuid>
### Timelines ### Timelines
@@ -39,7 +39,7 @@ All WAL is generated on a timeline. You can launch a read-only node
against a tag or arbitrary LSN on a timeline, but in order to write, against a tag or arbitrary LSN on a timeline, but in order to write,
you need to create a timeline. you need to create a timeline.
Each timeline is stored in a directory under .neon/timelines. It Each timeline is stored in a directory under .zenith/timelines. It
consists of a WAL archive, containing all the WAL in the standard consists of a WAL archive, containing all the WAL in the standard
PostgreSQL format, under the wal/ subdirectory. PostgreSQL format, under the wal/ subdirectory.
@@ -66,18 +66,18 @@ contains the UUID of the timeline (and LSN, for tags).
### Datadirs ### Datadirs
.neon/datadirs contains PostgreSQL data directories. You can launch .zenith/datadirs contains PostgreSQL data directories. You can launch
a Postgres instance on one of them with: a Postgres instance on one of them with:
``` ```
postgres -D .neon/datadirs/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c postgres -D .zenith/datadirs/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c
``` ```
All the actual data is kept in the timeline directories, under All the actual data is kept in the timeline directories, under
.neon/timelines. The data directories are only needed for active .zenith/timelines. The data directories are only needed for active
PostgreQSL instances. After an instance is stopped, the data directory PostgreQSL instances. After an instance is stopped, the data directory
can be safely removed. "neon start" will recreate it quickly from can be safely removed. "zenith start" will recreate it quickly from
the data in .neon/timelines, if it's missing. the data in .zenith/timelines, if it's missing.
## Version 2 ## Version 2
@@ -103,14 +103,14 @@ more advanced. The exact format is TODO. But it should support:
### Garbage collection ### Garbage collection
When you run "neon gc", old timelines that are no longer needed are When you run "zenith gc", old timelines that are no longer needed are
removed. That involves collecting the list of "unreachable" objects, removed. That involves collecting the list of "unreachable" objects,
starting from the named branches and tags. starting from the named branches and tags.
Also, if enough WAL has been generated on a timeline since last Also, if enough WAL has been generated on a timeline since last
snapshot, a new snapshot or delta is created. snapshot, a new snapshot or delta is created.
### neon push/pull ### zenith push/pull
Compare the tags and branches on both servers, and copy missing ones. Compare the tags and branches on both servers, and copy missing ones.
For each branch, compare the timeline it points to in both servers. If For each branch, compare the timeline it points to in both servers. If
@@ -123,7 +123,7 @@ every time you start up an instance? Then you would detect that the
timelines have diverged. That would match with the "epoch" concept timelines have diverged. That would match with the "epoch" concept
that we have in the WAL safekeeper that we have in the WAL safekeeper
### neon checkout/commit ### zenith checkout/commit
In this format, there is no concept of a "working tree", and hence no In this format, there is no concept of a "working tree", and hence no
concept of checking out or committing. All modifications are done on concept of checking out or committing. All modifications are done on
@@ -134,7 +134,7 @@ You can easily fork off a temporary timeline to emulate a "working tree".
You can later remove it and have it garbage collected, or to "commit", You can later remove it and have it garbage collected, or to "commit",
re-point the branch to the new timeline. re-point the branch to the new timeline.
If we want to have a worktree and "neon checkout/commit" concept, we can If we want to have a worktree and "zenith checkout/commit" concept, we can
emulate that with a temporary timeline. Create the temporary timeline at emulate that with a temporary timeline. Create the temporary timeline at
"neon checkout", and have "neon commit" modify the branch to point to "zenith checkout", and have "zenith commit" modify the branch to point to
the new timeline. the new timeline.

View File

@@ -4,27 +4,27 @@ How it works now
1. Create repository, start page server on it 1. Create repository, start page server on it
``` ```
$ neon init $ zenith init
... ...
created main branch created main branch
new neon repository was created in .neon new zenith repository was created in .zenith
$ neon pageserver start $ zenith pageserver start
Starting pageserver at '127.0.0.1:64000' in .neon Starting pageserver at '127.0.0.1:64000' in .zenith
Page server started Page server started
``` ```
2. Create a branch, and start a Postgres instance on it 2. Create a branch, and start a Postgres instance on it
``` ```
$ neon branch heikki main $ zenith branch heikki main
branching at end of WAL: 0/15ECF68 branching at end of WAL: 0/15ECF68
$ neon pg create heikki $ zenith pg create heikki
Initializing Postgres on timeline 76cf9279915be7797095241638e64644... Initializing Postgres on timeline 76cf9279915be7797095241638e64644...
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/pg1 port=55432 Extracting base backup to create postgres instance: path=.zenith/pgdatadirs/pg1 port=55432
$ neon pg start pg1 $ zenith pg start pg1
Starting postgres node at 'host=127.0.0.1 port=55432 user=heikki' Starting postgres node at 'host=127.0.0.1 port=55432 user=heikki'
waiting for server to start.... done waiting for server to start.... done
server started server started
@@ -52,20 +52,20 @@ serverless on your laptop, so that the workflow becomes just:
1. Create repository, start page server on it (same as before) 1. Create repository, start page server on it (same as before)
``` ```
$ neon init $ zenith init
... ...
created main branch created main branch
new neon repository was created in .neon new zenith repository was created in .zenith
$ neon pageserver start $ zenith pageserver start
Starting pageserver at '127.0.0.1:64000' in .neon Starting pageserver at '127.0.0.1:64000' in .zenith
Page server started Page server started
``` ```
2. Create branch 2. Create branch
``` ```
$ neon branch heikki main $ zenith branch heikki main
branching at end of WAL: 0/15ECF68 branching at end of WAL: 0/15ECF68
``` ```

View File

@@ -7,22 +7,22 @@ Here is a proposal about implementing push/pull mechanics between pageservers. W
The origin represents connection info for some remote pageserver. Let's use here same commands as git uses except using explicit list subcommand (git uses `origin -v` for that). The origin represents connection info for some remote pageserver. Let's use here same commands as git uses except using explicit list subcommand (git uses `origin -v` for that).
``` ```
neon origin add <name> <connection_uri> zenith origin add <name> <connection_uri>
neon origin list zenith origin list
neon origin remove <name> zenith origin remove <name>
``` ```
Connection URI a string of form `postgresql://user:pass@hostname:port` (https://www.postgresql.org/docs/13/libpq-connect.html#id-1.7.3.8.3.6). We can start with libpq password auth and later add support for client certs or require ssh as transport or invent some other kind of transport. Connection URI a string of form `postgresql://user:pass@hostname:port` (https://www.postgresql.org/docs/13/libpq-connect.html#id-1.7.3.8.3.6). We can start with libpq password auth and later add support for client certs or require ssh as transport or invent some other kind of transport.
Behind the scenes, this commands may update toml file inside .neon directory. Behind the scenes, this commands may update toml file inside .zenith directory.
## Push ## Push
### Pushing branch ### Pushing branch
``` ```
neon push mybranch cloudserver # push to eponymous branch in cloudserver zenith push mybranch cloudserver # push to eponymous branch in cloudserver
neon push mybranch cloudserver:otherbranch # push to a different branch in cloudserver zenith push mybranch cloudserver:otherbranch # push to a different branch in cloudserver
``` ```
Exact mechanics would be slightly different in the following situations: Exact mechanics would be slightly different in the following situations:

View File

@@ -2,7 +2,7 @@ While working on export/import commands, I understood that they fit really well
We may think about backups as snapshots in a different format (i.e plain pgdata format, basebackup tar format, WAL-G format (if they want to support it) and so on). They use same storage API, the only difference is the code that packs/unpacks files. We may think about backups as snapshots in a different format (i.e plain pgdata format, basebackup tar format, WAL-G format (if they want to support it) and so on). They use same storage API, the only difference is the code that packs/unpacks files.
Even if neon aims to maintains durability using it's own snapshots, backups will be useful for uploading data from postgres to neon. Even if zenith aims to maintains durability using it's own snapshots, backups will be useful for uploading data from postgres to zenith.
So here is an attempt to design consistent CLI for different usage scenarios: So here is an attempt to design consistent CLI for different usage scenarios:
@@ -16,8 +16,8 @@ Save`storage_dest` and other parameters in config.
Push snapshots to `storage_dest` in background. Push snapshots to `storage_dest` in background.
``` ```
neon init --storage_dest=S3_PREFIX zenith init --storage_dest=S3_PREFIX
neon start zenith start
``` ```
#### 2. Restart pageserver (manually or crash-recovery). #### 2. Restart pageserver (manually or crash-recovery).
@@ -25,7 +25,7 @@ Take `storage_dest` from pageserver config, start pageserver from latest snapsho
Push snapshots to `storage_dest` in background. Push snapshots to `storage_dest` in background.
``` ```
neon start zenith start
``` ```
#### 3. Import. #### 3. Import.
@@ -35,22 +35,22 @@ Do not save `snapshot_path` and `snapshot_format` in config, as it is a one-time
Save`storage_dest` parameters in config. Save`storage_dest` parameters in config.
Push snapshots to `storage_dest` in background. Push snapshots to `storage_dest` in background.
``` ```
//I.e. we want to start neon on top of existing $PGDATA and use s3 as a persistent storage. //I.e. we want to start zenith on top of existing $PGDATA and use s3 as a persistent storage.
neon init --snapshot_path=FILE_PREFIX --snapshot_format=pgdata --storage_dest=S3_PREFIX zenith init --snapshot_path=FILE_PREFIX --snapshot_format=pgdata --storage_dest=S3_PREFIX
neon start zenith start
``` ```
How to pass credentials needed for `snapshot_path`? How to pass credentials needed for `snapshot_path`?
#### 4. Export. #### 4. Export.
Manually push snapshot to `snapshot_path` which differs from `storage_dest` Manually push snapshot to `snapshot_path` which differs from `storage_dest`
Optionally set `snapshot_format`, which can be plain pgdata format or neon format. Optionally set `snapshot_format`, which can be plain pgdata format or zenith format.
``` ```
neon export --snapshot_path=FILE_PREFIX --snapshot_format=pgdata zenith export --snapshot_path=FILE_PREFIX --snapshot_format=pgdata
``` ```
#### Notes and questions #### Notes and questions
- safekeeper s3_offload should use same (similar) syntax for storage. How to set it in UI? - safekeeper s3_offload should use same (similar) syntax for storage. How to set it in UI?
- Why do we need `neon init` as a separate command? Can't we init everything at first start? - Why do we need `zenith init` as a separate command? Can't we init everything at first start?
- We can think of better names for all options. - We can think of better names for all options.
- Export to plain postgres format will be useless, if we are not 100% compatible on page level. - Export to plain postgres format will be useless, if we are not 100% compatible on page level.
I can recall at least one such difference - PD_WAL_LOGGED flag in pages. I can recall at least one such difference - PD_WAL_LOGGED flag in pages.

View File

@@ -9,7 +9,7 @@ receival and this might lag behind `term`; safekeeper switches to epoch `n` when
it has received all committed log records from all `< n` terms. This roughly it has received all committed log records from all `< n` terms. This roughly
corresponds to proposed in corresponds to proposed in
https://github.com/neondatabase/rfcs/pull/3/files https://github.com/zenithdb/rfcs/pull/3/files
This makes our biggest our difference from Raft. In Raft, every log record is This makes our biggest our difference from Raft. In Raft, every log record is

View File

@@ -1,6 +1,6 @@
# Safekeeper gossip # Safekeeper gossip
Extracted from this [PR](https://github.com/neondatabase/rfcs/pull/13) Extracted from this [PR](https://github.com/zenithdb/rfcs/pull/13)
## Motivation ## Motivation

View File

@@ -2,7 +2,7 @@
Created on 19.01.22 Created on 19.01.22
Initially created [here](https://github.com/neondatabase/rfcs/pull/16) by @kelvich. Initially created [here](https://github.com/zenithdb/rfcs/pull/16) by @kelvich.
That it is an alternative to (014-safekeeper-gossip)[] That it is an alternative to (014-safekeeper-gossip)[]
@@ -292,4 +292,4 @@ But with an etcd we are in a bit different situation:
1. We don't need persistency and strong consistency guarantees for the data we store in the etcd 1. We don't need persistency and strong consistency guarantees for the data we store in the etcd
2. etcd uses Grpc as a protocol, and messages are pretty simple 2. etcd uses Grpc as a protocol, and messages are pretty simple
So it looks like implementing in-mem store with etcd interface is straightforward thing _if we will want that in future_. At the same time, we can avoid implementing it right now, and we will be able to run local neon installation with etcd running somewhere in the background (as opposed to building and running console, which in turn requires Postgres). So it looks like implementing in-mem store with etcd interface is straightforward thing _if we will want that in future_. At the same time, we can avoid implementing it right now, and we will be able to run local zenith installation with etcd running somewhere in the background (as opposed to building and running console, which in turn requires Postgres).

View File

@@ -1,420 +0,0 @@
# Splitting cloud console
Created on 17.06.2022
## Summary
Currently we have `cloud` repository that contains code implementing public API for our clients as well as code for managing storage and internal infrastructure services. We can split everything user-related from everything storage-related to make it easier to test and maintain.
This RFC proposes to introduce a new control-plane service with HTTP API. The overall architecture will look like this:
```markup
. x
external area x internal area
(our clients) x (our services)
x
x ┌───────────────────────┐
x ┌───────────────┐ > ┌─────────────────────┐ │ Storage (EC2) │
x │ console db │ > │ control-plane db │ │ │
x └───────────────┘ > └─────────────────────┘ │ - safekeepers │
x ▲ > ▲ │ - pageservers │
x │ > │ │ │
┌──────────────────┐ x ┌───────┴───────┐ > │ │ Dependencies │
│ browser UI ├──►│ │ > ┌──────────┴──────────┐ │ │
└──────────────────┘ x │ │ > │ │ │ - etcd │
x │ console ├───────►│ control-plane ├────►│ - S3 │
┌──────────────────┐ x │ │ > │ (deployed in k8s) │ │ - more? │
│public API clients├──►│ │ > │ │ │ │
└──────────────────┘ x └───────┬───────┘ > └──────────┬──────────┘ └───────────────────────┘
x │ > ▲ │ ▲
x │ > │ │ │
x ┌───────┴───────┐ > │ │ ┌───────────┴───────────┐
x │ dependencies │ > │ │ │ │
x │- analytics │ > │ └───────────────►│ computes │
x │- auth │ > │ │ (deployed in k8s) │
x │- billing │ > │ │ │
x └───────────────┘ > │ └───────────────────────┘
x > │ ▲
x > ┌─────┴───────────────┐ │
┌──────────────────┐ x > │ │ │
│ │ x > │ proxy ├─────────────────┘
│ postgres ├───────────────────────────►│ (deployed in k8s) │
│ users │ x > │ │
│ │ x > └─────────────────────┘
└──────────────────┘ x >
>
>
closed-source > open-source
>
>
```
Notes:
- diagram is simplified in the less-important places
- directed arrows are strict and mean that connections in the reverse direction are forbidden
This split is quite complex and this RFC proposes several smaller steps to achieve the larger goal:
1. Start by refactoring the console code, the goal is to have console and control-plane code in the different directories without dependencies on each other.
2. Do similar refactoring for tables in the console database, remove queries selecting data from both console and control-plane; move control-plane tables to a separate database.
3. Implement control-plane HTTP API serving on a separate TCP port; make all console→control-plane calls to go through that HTTP API.
4. Move control-plane source code to the neon repo; start control-plane as a separate service.
## Motivation
These are the two most important problems we want to solve:
- Publish open-source implementation of all our cloud/storage features
- Make a unified control-plane that is used in all cloud (serverless) and local (tests) setups
Right now we have some closed-source code in the cloud repo. That code contains implementation for running Neon computes in k8s and without that code its impossible to automatically scale PostgreSQL computes. That means that we dont have an open-source serverless PostgreSQL at the moment.
After splitting and open-sourcing control-plane service we will have source code and Docker images for all storage services. That control-plane service should have HTTP API for creating and managing tenants (including all our storage features), while proxy will listen for incoming connections and create computes on-demand.
Improving our test suite is an important task, but requires a lot of prerequisites and may require a separate RFC. Possible implementation of that is described in the section [Next steps](#next-steps).
Another piece of motivation can be a better involvement of storage development team into a control-plane. By splitting control-plane from the console, it can be more convenient to test and develop control-plane with paying less attention to “business” features, such as user management, billing and analytics.
For example, console currently requires authentication providers such as GitHub OAuth to work at all, as well as nodejs to be able to build it locally. It will be more convenient to build and run it locally without these requirements.
## Proposed implementation
### Current state of things
Lets start with defining the current state of things at the moment of this proposal. We have three repositories containing source code:
- open-source `postgres` — our fork of postgres
- open-source `neon` — our main repository for storage source code
- closed-source `cloud` — mostly console backend and UI frontend
This proposal aims not to change anything at the existing code in `neon` and `postgres` repositories, but to create control-plane service and move its source code from `cloud` to the `neon` repository. That means that we need to split code in `cloud` repo only, and will consider only this repository for exploring its source code.
Lets look at the miscellaneous things in the `cloud` repo which are NOT part of the console application, i.e. NOT the Go source code that is compiled to the `./console` binary. There we have:
- command-line tools, such as cloudbench, neonadmin
- markdown documentation
- cloud operations scripts (helm, terraform, ansible)
- configs and other things
- e2e python tests
- incidents playbooks
- UI frontend
- Make build scripts, code generation scripts
- database migrations
- swagger definitions
And also lets take a look at what we have in the console source code, which is the service wed like to split:
- API Servers
- Public API v2
- Management API v2
- Public API v1
- Admin API v1 (same port as Public API v1)
- Management API v1
- Workers
- Monitor Compute Activity
- Watch Failed Operations
- Availability Checker
- Business Metrics Collector
- Internal Services
- Auth Middleware, UserIsAdmin, Cookies
- Cable Websocket Server
- Admin Services
- Global Settings, Operations, Pageservers, Platforms, Projects, Safekeepers, Users
- Authenticate Proxy
- API Keys
- App Controller, serving UI HTML
- Auth Controller
- Branches
- Projects
- Psql Connect + Passwordless login
- Users
- Cloud Metrics
- User Metrics
- Invites
- Pageserver/Safekeeper management
- Operations, k8s/docker/common logic
- Platforms, Regions
- Project State
- Projects Roles, SCRAM
- Global Settings
- Other things
- segment analytics integration
- sentry integration
- other common utilities packages
### Drawing the splitting line
The most challenging and the most important thing is to define the line that will split new control-plane service from the existing cloud service. If we dont get it right, then we can end up with having a lot more issues without many benefits.
We propose to define that line as follows:
- everything user-related stays in the console service
- everything storage-related should be in the control-plane service
- something that falls in between should be decided where to go, but most likely should stay in the console service
- some similar parts should be in both services, such as admin/management/db_migrations
We call user-related all requests that can be connected to some user. The general idea is dont have any user_id in the control-plane service and operate exclusively on tenant_id+timeline_id, the same way as existing storage services work now (compute, safekeeper, pageserver).
Storage-related things can be defined as doing any of the following:
- using k8s API
- doing requests to any of the storage services (proxy, compute, safekeeper, pageserver, etc..)
- tracking current status of tenants/timelines, managing lifetime of computes
Based on that idea, we can say that new control-plane service should have the following components:
- single HTTP API for everything
- Create and manage tenants and timelines
- Manage global settings and storage configuration (regions, platforms, safekeepers, pageservers)
- Admin API for storage health inspection and debugging
- Workers
- Monitor Compute Activity
- Watch Failed Operations
- Availability Checker
- Internal Services
- Admin Services
- Global Settings, Operations, Pageservers, Platforms, Tenants, Safekeepers
- Authenticate Proxy
- Branches
- Psql Connect
- Cloud Metrics
- Pageserver/Safekeeper management
- Operations, k8s/docker/common logic
- Platforms, Regions
- Tenant State
- Compute Roles, SCRAM
- Global Settings
---
And other components should probably stay in the console service:
- API Servers (no changes here)
- Public API v2
- Management API v2
- Public API v1
- Admin API v1 (same port as Public API v1)
- Management API v1
- Workers
- Business Metrics Collector
- Internal Services
- Auth Middleware, UserIsAdmin, Cookies
- Cable Websocket Server
- Admin Services
- Users admin stays the same
- Other admin services can redirect requests to the control-plane
- API Keys
- App Controller, serving UI HTML
- Auth Controller
- Projects
- User Metrics
- Invites
- Users
- Passwordless login
- Other things
- segment analytics integration
- sentry integration
- other common utilities packages
There are also miscellaneous things that are useful for all kinds of services. So we can say that these things can be in both services:
- markdown documentation
- e2e python tests
- make build scripts, code generation scripts
- database migrations
- swagger definitions
The single entrypoint to the storage should be control-plane API. After we define that API, we can have code-generated implementation for the client and for the server. The general idea is to move code implementing storage components from the console to the API implementation inside the new control-plane service.
After the code is moved to the new service, we can fill the created void by making API calls to the new service:
- authorization of the client
- mapping user_id + project_id to the tenant_id
- calling the control-plane API
### control-plane API
Currently we have the following projects API in the console:
```
GET /projects/{project_id}
PATCH /projects/{project_id}
POST /projects/{project_id}/branches
GET /projects/{project_id}/databases
POST /projects/{project_id}/databases
GET /projects/{project_id}/databases/{database_id}
PUT /projects/{project_id}/databases/{database_id}
DELETE /projects/{project_id}/databases/{database_id}
POST /projects/{project_id}/delete
GET /projects/{project_id}/issue_token
GET /projects/{project_id}/operations
GET /projects/{project_id}/operations/{operation_id}
POST /projects/{project_id}/query
GET /projects/{project_id}/roles
POST /projects/{project_id}/roles
GET /projects/{project_id}/roles/{role_name}
DELETE /projects/{project_id}/roles/{role_name}
POST /projects/{project_id}/roles/{role_name}/reset_password
POST /projects/{project_id}/start
POST /projects/{project_id}/stop
POST /psql_session/{psql_session_id}
```
It looks fine and we probably already have clients relying on it. So we should not change it, at least for now. But most of these endpoints (if not all) are related to storage, and it can suggest us what control-plane API should look like:
```
GET /tenants/{tenant_id}
PATCH /tenants/{tenant_id}
POST /tenants/{tenant_id}/branches
GET /tenants/{tenant_id}/databases
POST /tenants/{tenant_id}/databases
GET /tenants/{tenant_id}/databases/{database_id}
PUT /tenants/{tenant_id}/databases/{database_id}
DELETE /tenants/{tenant_id}/databases/{database_id}
POST /tenants/{tenant_id}/delete
GET /tenants/{tenant_id}/issue_token
GET /tenants/{tenant_id}/operations
GET /tenants/{tenant_id}/operations/{operation_id}
POST /tenants/{tenant_id}/query
GET /tenants/{tenant_id}/roles
POST /tenants/{tenant_id}/roles
GET /tenants/{tenant_id}/roles/{role_name}
DELETE /tenants/{tenant_id}/roles/{role_name}
POST /tenants/{tenant_id}/roles/{role_name}/reset_password
POST /tenants/{tenant_id}/start
POST /tenants/{tenant_id}/stop
POST /psql_session/{psql_session_id}
```
One of the options here is to use gRPC instead of the HTTP, which has some useful features, but there are some strong points towards using plain HTTP:
- HTTP API is easier to use for the clients
- we already have HTTP API in pageserver/safekeeper/console
- we probably want control-plane API to be similar to the console API, available in the cloud
### Getting updates from the storage
There can be some valid cases, when we would like to know what is changed in the storage. For example, console might want to know when user has queried and started compute and when compute was scaled to zero after that, to know how much user should pay for the service. Another example is to get info about reaching the disk space limits. Yet another example is to do analytics, such as how many users had at least one active project in a month.
All of the above cases can happen without using the console, just by accessing compute through the proxy.
To solve this, we can have a log of events occurring in the storage (event logs). That is very similar to operations table we have right now, the only difference is that events are immutable and we cannot change them after saving to the database. For example, we might want to have events for the following activities:
- We finished processing some HTTP API query, such as resetting the password
- We changed some state, such as started or stopped a compute
- Operation is created
- Operation is started for the first time
- Operation is failed for the first time
- Operation is finished
Once we save these events to the database, we can create HTTP API to subscribe to these events. That API can look like this:
```
GET /events/<cursor>
{
"events": [...],
"next_cursor": 123
}
```
It should be possible to replay event logs from some point of time, to get a state of almost anything from the storage services. That means that if we maintain some state in the control-plane database and we have a reason to have the same state in the console database, it is possible by polling events from the control-plane API and changing the state in the console database according to the events.
### Next steps
After implementing control-plane HTTP API and starting control-plane as a separate service, we might want to think of exploiting benefits of the new architecture, such as reorganizing test infrastructure. Possible options are listed in the [Next steps](#next-steps-1).
## Non Goals
RFC doesnt cover the actual cloud deployment scripts and schemas, such as terraform, ansible, k8s yamls and so on.
## Impacted components
Mostly console, but can also affect some storage service.
## Scalability
We should support starting several instances of the new control-plane service at the same time.
At the same time, it should be possible to use only single instance of control-plane, which can be useful for local tests.
## Security implications
New control-plane service is an internal service, so no external requests can reach it. But at the same time, it contains API to do absolutely anything with any of the tenants. That means that bad internal actor can potentially read and write all of the tenants. To make this safer, we can have one of these:
- Simple option is to protect all requests with a single private key, so that no one can make requests without having that one key.
- Another option is to have a separate token for every tenant and store these tokens in another secure place. This way its harder to access all tenants at once, because they have the different tokens.
## Alternative implementation
There was an idea to create a k8s operator for managing storage services and computes, but author of this RFC is not really familiar with it.
Regarding less alternative ideas, there are another options for the name of the new control-plane service:
- storage-ctl
- cloud
- cloud-ctl
## Pros/cons of proposed approaches (TODO)
Pros:
- All storage features are completely open-source
- Better tests coverage, less difference between cloud and local setups
- Easier to develop storage and cloud features, because there is no need to setup console for that
- Easier to deploy storage-only services to the any cloud
Cons:
- All storage features are completely open-source
- Distributed services mean more code to connect different services and potential network issues
- Console needs to have a dependency on storage API, there can be complications with developing new feature in a branch
- More code to JOIN data from different services (console and control-plane)
## Definition of Done
We have a new control-plane service running in the k8s. Source code for that control-plane service is located in the open-source neon repo.
## Next steps
After weve reached DoD, we can make further improvements.
First thing that can benefit from the split is local testing. The same control-plane service can implement starting computes as a local processes instead of k8s deployments. If it will also support starting pageservers/safekeepers/proxy for the local setup, then it can completely replace `./neon_local` binary, which is currently used for testing. The local testing environment can look like this:
```
┌─────────────────────┐ ┌───────────────────────┐
│ │ │ Storage (local) │
│ control-plane db │ │ │
│ (local process) │ │ - safekeepers │
│ │ │ - pageservers │
└──────────▲──────────┘ │ │
│ │ Dependencies │
┌──────────┴──────────┐ │ │
│ │ │ - etcd │
│ control-plane ├────►│ - S3 │
│ (local process) │ │ - more? │
│ │ │ │
└──────────┬──────────┘ └───────────────────────┘
▲ │ ▲
│ │ │
│ │ ┌───────────┴───────────┐
│ │ │ │
│ └───────────────►│ computes │
│ │ (local processes) │
│ │ │
┌──────┴──────────────┐ └───────────────────────┘
│ │ ▲
│ proxy │ │
│ (local process) ├─────────────────┘
│ │
└─────────────────────┘
```
The key thing here is that control-plane local service have the same API and almost the same implementation as the one deployed in the k8s. This allows to run the same e2e tests against both cloud and local setups.
For the python test_runner tests everything can stay mostly the same. To do that, we just need to replace `./neon_local` cli commands with API calls to the control-plane.
The benefit here will be in having fast local tests that are really close to our cloud setup. Bugs in k8s queries are still cannot be found when running computes as a local processes, but it should be really easy to start k8s locally (for example in k3s) and run the same tests with control-plane connected to the local k8s.
Talking about console and UI tests, after the split there should be a way to test these without spinning up all the storage locally. New control-plane service has a well-defined API, allowing us to mock it. This way we can create UI tests to verify the right calls are issued after specific UI interactions and verify that we render correct messages when API returns errors.

View File

@@ -78,7 +78,7 @@ with grpc streams and tokio mpsc channels. The implementation description is at
It is just 500 lines of code and core functionality is complete. 1-1 pub sub It is just 500 lines of code and core functionality is complete. 1-1 pub sub
gives about 120k received messages per second; having multiple subscribers in gives about 120k received messages per second; having multiple subscribers in
different connections quickly scales to 1 million received messages per second. different connecitons quickly scales to 1 million received messages per second.
I had concerns about many concurrent streams in singe connection, but 2^20 I had concerns about many concurrent streams in singe connection, but 2^20
subscribers still work (though eat memory, with 10 publishers 20GB are consumed; subscribers still work (though eat memory, with 10 publishers 20GB are consumed;
in this implementation each publisher holds full copy of all subscribers). There in this implementation each publisher holds full copy of all subscribers). There
@@ -95,12 +95,12 @@ other members, with best-effort this is simple.
### Security implications ### Security implications
Communication happens in a private network that is not exposed to users; Communication happens in a private network that is not exposed to users;
additionally we can add auth to the broker. additionaly we can add auth to the broker.
## Alternative: get existing pub-sub ## Alternative: get existing pub-sub
We could take some existing pub sub solution, e.g. RabbitMQ, Redis. But in this We could take some existing pub sub solution, e.g. RabbitMQ, Redis. But in this
case IMV simplicity of our own outweighs external dependency costs (RabbitMQ is case IMV simplicity of our own outweights external dependency costs (RabbitMQ is
much more complicated and needs VM; Redis Rust client maintenance is not much more complicated and needs VM; Redis Rust client maintenance is not
ideal...). Also note that projects like CockroachDB and TiDB are based on gRPC ideal...). Also note that projects like CockroachDB and TiDB are based on gRPC
as well. as well.

View File

@@ -74,7 +74,7 @@ TenantMaintenanceGuard: Like ActiveTenantGuard, but can be held even when the
tenant is not in Active state. Used for operations like attach/detach. Perhaps tenant is not in Active state. Used for operations like attach/detach. Perhaps
allow only one such guard on a Tenant at a time. allow only one such guard on a Tenant at a time.
Similarly for Timelines. We don't currently have a "state" on Timeline, but I think Similarly for Timelines. We don't currentl have a "state" on Timeline, but I think
we need at least two states: Active and Stopping. The Stopping state is used at we need at least two states: Active and Stopping. The Stopping state is used at
deletion, to prevent new TimelineActiveGuards from appearing, while you wait for deletion, to prevent new TimelineActiveGuards from appearing, while you wait for
existing TimelineActiveGuards to die out. existing TimelineActiveGuards to die out.
@@ -85,7 +85,7 @@ have a TenantActiveGuard, and the tenant's state changes from Active to
Stopping, the is_shutdown_requested() function should return true, and Stopping, the is_shutdown_requested() function should return true, and
shutdown_watcher() future should return. shutdown_watcher() future should return.
This signaling doesn't necessarily need to cover all cases. For example, if you This signaling doesn't neessarily need to cover all cases. For example, if you
have a block of code in spawn_blocking(), it might be acceptable if have a block of code in spawn_blocking(), it might be acceptable if
is_shutdown_requested() doesn't return true even though the tenant is in is_shutdown_requested() doesn't return true even though the tenant is in
Stopping state, as long as the code finishes reasonably fast. Stopping state, as long as the code finishes reasonably fast.

View File

@@ -37,7 +37,7 @@ sequenceDiagram
``` ```
At this point it is not possible to restore from index, it contains L2 which At this point it is not possible to restore from index, it contains L2 which
is no longer available in s3 and doesn't contain L3 added by compaction by the is no longer available in s3 and doesnt contain L3 added by compaction by the
first pageserver. So if any of the pageservers restart initial sync will fail first pageserver. So if any of the pageservers restart initial sync will fail
(or in on-demand world it will fail a bit later during page request from (or in on-demand world it will fail a bit later during page request from
missing layer) missing layer)
@@ -74,7 +74,7 @@ One possible solution for relocation case is to orchestrate background jobs
from outside. The oracle who runs migration can turn off background jobs on from outside. The oracle who runs migration can turn off background jobs on
PS1 before migration and then run migration -> enable them on PS2. The problem PS1 before migration and then run migration -> enable them on PS2. The problem
comes if migration fails. In this case in order to resume background jobs comes if migration fails. In this case in order to resume background jobs
oracle needs to guarantee that PS2 doesn't run background jobs and if it doesn't oracle needs to guarantee that PS2 doesnt run background jobs and if it doesnt
respond then PS1 is stuck unable to run compaction/gc. This cannot be solved respond then PS1 is stuck unable to run compaction/gc. This cannot be solved
without human ensuring that no upload from PS2 can happen. In order to be able without human ensuring that no upload from PS2 can happen. In order to be able
to resolve this automatically CAS is required on S3 side so pageserver can to resolve this automatically CAS is required on S3 side so pageserver can
@@ -128,7 +128,7 @@ During discussion it seems that we converged on the approach consisting of:
whether we need to apply change to the index state or not. whether we need to apply change to the index state or not.
- Responsibility for running background jobs is assigned externally. Pageserver - Responsibility for running background jobs is assigned externally. Pageserver
keeps locally persistent flag for each tenant that indicates whether this keeps locally persistent flag for each tenant that indicates whether this
pageserver is considered as primary one or not. TODO what happens if we pageserver is considered as primary one or not. TODO what happends if we
crash and cannot start for some extended period of time? Control plane can crash and cannot start for some extended period of time? Control plane can
assign ownership to some other pageserver. Pageserver needs some way to check assign ownership to some other pageserver. Pageserver needs some way to check
if its still the blessed one. Maybe by explicit request to control plane on if its still the blessed one. Maybe by explicit request to control plane on
@@ -138,7 +138,7 @@ Requirement for deterministic layer generation was considered overly strict
because of two reasons: because of two reasons:
- It can limit possible optimizations e g when pageserver wants to reshuffle - It can limit possible optimizations e g when pageserver wants to reshuffle
some data locally and doesn't want to coordinate this some data locally and doesnt want to coordinate this
- The deterministic algorithm itself can change so during deployments for some - The deterministic algorithm itself can change so during deployments for some
time there will be two different version running at the same time which can time there will be two different version running at the same time which can
cause non determinism cause non determinism
@@ -164,7 +164,7 @@ sequenceDiagram
CP->>PS1: Yes CP->>PS1: Yes
deactivate CP deactivate CP
PS1->>S3: Fetch PS1 index. PS1->>S3: Fetch PS1 index.
note over PS1: Continue operations, start background jobs note over PS1: Continue operations, start backround jobs
note over PS1,PS2: PS1 starts up and still and is not a leader anymore note over PS1,PS2: PS1 starts up and still and is not a leader anymore
PS1->>CP: Am I still the leader for Tenant X? PS1->>CP: Am I still the leader for Tenant X?
CP->>PS1: No CP->>PS1: No
@@ -203,7 +203,7 @@ sequenceDiagram
### Eviction ### Eviction
When two pageservers operate on a tenant for extended period of time follower When two pageservers operate on a tenant for extended period of time follower
doesn't perform write operations in s3. When layer is evicted follower relies doesnt perform write operations in s3. When layer is evicted follower relies
on updates from primary to get info about layers it needs to cover range for on updates from primary to get info about layers it needs to cover range for
evicted layer. evicted layer.

View File

@@ -4,7 +4,7 @@ Created on 08.03.23
## Motivation ## Motivation
Currently we don't delete pageserver part of the data from s3 when project is deleted. (The same is true for safekeepers, but this outside of the scope of this RFC). Currently we dont delete pageserver part of the data from s3 when project is deleted. (The same is true for safekeepers, but this outside of the scope of this RFC).
This RFC aims to spin a discussion to come to a robust deletion solution that wont put us in into a corner for features like postponed deletion (when we keep data for user to be able to restore a project if it was deleted by accident) This RFC aims to spin a discussion to come to a robust deletion solution that wont put us in into a corner for features like postponed deletion (when we keep data for user to be able to restore a project if it was deleted by accident)
@@ -75,9 +75,9 @@ Remote one is needed for cases when pageserver is lost during deletion so other
Why local mark file is needed? Why local mark file is needed?
If we don't have one, we have two choices, delete local data before deleting the remote part or do that after. If we dont have one, we have two choices, delete local data before deleting the remote part or do that after.
If we delete local data before remote then during restart pageserver wont pick up remote tenant at all because nothing is available locally (pageserver looks for remote counterparts of locally available tenants). If we delete local data before remote then during restart pageserver wont pick up remote tenant at all because nothing is available locally (pageserver looks for remote conuterparts of locally available tenants).
If we delete local data after remote then at the end of the sequence when remote mark file is deleted if pageserver restart happens then the state is the same to situation when pageserver just missing data on remote without knowing the fact that this data is intended to be deleted. In this case the current behavior is upload everything local-only to remote. If we delete local data after remote then at the end of the sequence when remote mark file is deleted if pageserver restart happens then the state is the same to situation when pageserver just missing data on remote without knowing the fact that this data is intended to be deleted. In this case the current behavior is upload everything local-only to remote.
@@ -145,7 +145,7 @@ sequenceDiagram
CP->>PS: Retry delete tenant CP->>PS: Retry delete tenant
PS->>CP: Not modified PS->>CP: Not modified
else Mark is missing else Mark is missing
note over PS: Continue to operate the tenant as if deletion didn't happen note over PS: Continue to operate the tenant as if deletion didnt happen
note over CP: Eventually console should <br> retry delete request note over CP: Eventually console should <br> retry delete request
@@ -168,7 +168,7 @@ sequenceDiagram
PS->>CP: True PS->>CP: True
``` ```
Similar sequence applies when both local and remote marks were persisted but Control Plane still didn't receive a response. Similar sequence applies when both local and remote marks were persisted but Control Plane still didnt receive a response.
If pageserver crashes after both mark files were deleted then it will reply to control plane status poll request with 404 which should be treated by control plane as success. If pageserver crashes after both mark files were deleted then it will reply to control plane status poll request with 404 which should be treated by control plane as success.
@@ -187,7 +187,7 @@ If pageseserver is lost then the deleted tenant should be attached to different
##### Restrictions for tenant that is in progress of being deleted ##### Restrictions for tenant that is in progress of being deleted
I propose to add another state to tenant/timeline - PendingDelete. This state shouldn't allow executing any operations aside from polling the deletion status. I propose to add another state to tenant/timeline - PendingDelete. This state shouldnt allow executing any operations aside from polling the deletion status.
#### Summary #### Summary
@@ -237,7 +237,7 @@ New branch gets created
PS1 starts up (is it possible or we just recycle it?) PS1 starts up (is it possible or we just recycle it?)
PS1 is unaware of the new branch. It can either fall back to s3 ls, or ask control plane. PS1 is unaware of the new branch. It can either fall back to s3 ls, or ask control plane.
So here comes the dependency of storage on control plane. During restart storage needs to know which timelines are valid for operation. If there is nothing on s3 that can answer that question storage needs to ask control plane. So here comes the dependency of storage on control plane. During restart storage needs to know which timelines are valid for operation. If there is nothing on s3 that can answer that question storage neeeds to ask control plane.
### Summary ### Summary
@@ -250,7 +250,7 @@ Cons:
Pros: Pros:
- Easier to reason about if you don't have to account for pageserver restarts - Easier to reason about if you dont have to account for pageserver restarts
### Extra notes ### Extra notes
@@ -262,7 +262,7 @@ Delayed deletion can be done with both approaches. As discussed with Anna (@step
After discussion in comments I see that we settled on two options (though a bit different from ones described in rfc). First one is the same - pageserver owns as much as possible. The second option is that pageserver owns markers thing, but actual deletion happens in control plane by repeatedly calling ls + delete. After discussion in comments I see that we settled on two options (though a bit different from ones described in rfc). First one is the same - pageserver owns as much as possible. The second option is that pageserver owns markers thing, but actual deletion happens in control plane by repeatedly calling ls + delete.
To my mind the only benefit of the latter approach is possible code reuse between safekeepers and pageservers. Otherwise poking around integrating s3 library into control plane, configuring shared knowledge about paths in s3 - are the downsides. Another downside of relying on control plane is the testing process. Control plane resides in different repository so it is quite hard to test pageserver related changes there. e2e test suite there doesn't support shutting down pageservers, which are separate docker containers there instead of just processes. To my mind the only benefit of the latter approach is possible code reuse between safekeepers and pageservers. Otherwise poking around integrating s3 library into control plane, configuring shared knowledge abouth paths in s3 - are the downsides. Another downside of relying on control plane is the testing process. Control plane resides in different repository so it is quite hard to test pageserver related changes there. e2e test suite there doesnt support shutting down pageservers, which are separate docker containers there instead of just processes.
With pageserver owning everything we still give the retry logic to control plane but its easier to duplicate if needed compared to sharing inner s3 workings. We will have needed tests for retry logic in neon repo. With pageserver owning everything we still give the retry logic to control plane but its easier to duplicate if needed compared to sharing inner s3 workings. We will have needed tests for retry logic in neon repo.

View File

@@ -75,7 +75,7 @@ sequenceDiagram
``` ```
At this point it is not possible to restore the state from index, it contains L2 which At this point it is not possible to restore the state from index, it contains L2 which
is no longer available in s3 and doesn't contain L3 added by compaction by the is no longer available in s3 and doesnt contain L3 added by compaction by the
first pageserver. So if any of the pageservers restart, initial sync will fail first pageserver. So if any of the pageservers restart, initial sync will fail
(or in on-demand world it will fail a bit later during page request from (or in on-demand world it will fail a bit later during page request from
missing layer) missing layer)
@@ -171,7 +171,7 @@ sequenceDiagram
Another problem is a possibility of concurrent branch creation calls. Another problem is a possibility of concurrent branch creation calls.
I e during migration create_branch can be called on old pageserver and newly created branch wont be seen on new pageserver. Prior art includes prototyping an approach of trying to mirror such branches, but currently it lost its importance, because now attach is fast because we don't need to download all data, and additionally to the best of my knowledge of control plane internals (cc @ololobus to confirm) operations on one project are executed sequentially, so it is not possible to have such case. So branch create operation will be executed only when relocation is completed. As a safety measure we can forbid branch creation for tenants that are in readonly remote state. I e during migration create_branch can be called on old pageserver and newly created branch wont be seen on new pageserver. Prior art includes prototyping an approach of trying to mirror such branches, but currently it lost its importance, because now attach is fast because we dont need to download all data, and additionally to the best of my knowledge of control plane internals (cc @ololobus to confirm) operations on one project are executed sequentially, so it is not possible to have such case. So branch create operation will be executed only when relocation is completed. As a safety measure we can forbid branch creation for tenants that are in readonly remote state.
## Simplistic approach ## Simplistic approach

View File

@@ -55,7 +55,7 @@ When PostgreSQL requests a file, `compute_ctl` downloads it.
PostgreSQL requests files in the following cases: PostgreSQL requests files in the following cases:
- When loading a preload library set in `local_preload_libraries` - When loading a preload library set in `local_preload_libraries`
- When explicitly loading a library with `LOAD` - When explicitly loading a library with `LOAD`
- When creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files))) - Wnen creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files)))
#### Summary #### Summary

Some files were not shown because too many files have changed in this diff Show More