Compare commits

..

1 Commits

Author SHA1 Message Date
Anna Khanova
136ed19387 Test 2024-03-27 13:42:33 +01:00
316 changed files with 9503 additions and 21746 deletions

View File

@@ -22,7 +22,6 @@
!s3_scrubber/ !s3_scrubber/
!safekeeper/ !safekeeper/
!storage_broker/ !storage_broker/
!storage_controller/
!trace/ !trace/
!vendor/postgres-*/ !vendor/postgres-*/
!workspace_hack/ !workspace_hack/

View File

@@ -150,7 +150,7 @@ runs:
# Use aws s3 cp (instead of aws s3 sync) to keep files from previous runs to make old URLs work, # Use aws s3 cp (instead of aws s3 sync) to keep files from previous runs to make old URLs work,
# and to keep files on the host to upload them to the database # and to keep files on the host to upload them to the database
time s5cmd --log error cp "${WORKDIR}/report/*" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}/" time aws s3 cp --recursive --only-show-errors "${WORKDIR}/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
# Generate redirect # Generate redirect
cat <<EOF > ${WORKDIR}/index.html cat <<EOF > ${WORKDIR}/index.html

View File

@@ -10,7 +10,7 @@ inputs:
required: true required: true
api_host: api_host:
desctiption: 'Neon API host' desctiption: 'Neon API host'
default: console-stage.neon.build default: console.stage.neon.tech
outputs: outputs:
dsn: dsn:
description: 'Created Branch DSN (for main database)' description: 'Created Branch DSN (for main database)'

View File

@@ -13,7 +13,7 @@ inputs:
required: true required: true
api_host: api_host:
desctiption: 'Neon API host' desctiption: 'Neon API host'
default: console-stage.neon.build default: console.stage.neon.tech
runs: runs:
using: "composite" using: "composite"

View File

@@ -13,7 +13,7 @@ inputs:
default: 15 default: 15
api_host: api_host:
desctiption: 'Neon API host' desctiption: 'Neon API host'
default: console-stage.neon.build default: console.stage.neon.tech
provisioner: provisioner:
desctiption: 'k8s-pod or k8s-neonvm' desctiption: 'k8s-pod or k8s-neonvm'
default: 'k8s-pod' default: 'k8s-pod'

View File

@@ -10,7 +10,7 @@ inputs:
required: true required: true
api_host: api_host:
desctiption: 'Neon API host' desctiption: 'Neon API host'
default: console-stage.neon.build default: console.stage.neon.tech
runs: runs:
using: "composite" using: "composite"

View File

@@ -18,7 +18,6 @@ on:
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number }} group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: false
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -147,16 +147,15 @@ jobs:
"neonvm-captest-new" "neonvm-captest-new"
], ],
"db_size": [ "10gb" ], "db_size": [ "10gb" ],
"include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" }, "include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" },
{ "platform": "neon-captest-new", "db_size": "50gb" }, { "platform": "neon-captest-new", "db_size": "50gb" },
{ "platform": "neonvm-captest-freetier", "db_size": "3gb" }, { "platform": "neonvm-captest-freetier", "db_size": "3gb" },
{ "platform": "neonvm-captest-new", "db_size": "50gb" }, { "platform": "neonvm-captest-new", "db_size": "50gb" }]
{ "platform": "neonvm-captest-sharding-reuse", "db_size": "50gb" }]
}' }'
if [ "$(date +%A)" = "Saturday" ]; then if [ "$(date +%A)" = "Saturday" ]; then
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"}, matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"},
{ "platform": "rds-aurora", "db_size": "50gb"}]') { "platform": "rds-aurora", "db_size": "50gb"}]')
fi fi
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
@@ -172,7 +171,7 @@ jobs:
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" }, matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" },
{ "platform": "rds-aurora" }]') { "platform": "rds-aurora" }]')
fi fi
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
@@ -191,7 +190,7 @@ jobs:
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" }, matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" },
{ "platform": "rds-aurora", "scale": "10" }]') { "platform": "rds-aurora", "scale": "10" }]')
fi fi
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
@@ -254,9 +253,6 @@ jobs:
neon-captest-reuse) neon-captest-reuse)
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }} CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
;; ;;
neonvm-captest-sharding-reuse)
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
;;
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier) neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier)
CONNSTR=${{ steps.create-neon-project.outputs.dsn }} CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
;; ;;
@@ -274,15 +270,11 @@ jobs:
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
QUERIES=("SELECT version()") QUERY="SELECT version();"
if [[ "${PLATFORM}" = "neon"* ]]; then if [[ "${PLATFORM}" = "neon"* ]]; then
QUERIES+=("SHOW neon.tenant_id") QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
QUERIES+=("SHOW neon.timeline_id")
fi fi
psql ${CONNSTR} -c "${QUERY}"
for q in "${QUERIES[@]}"; do
psql ${CONNSTR} -c "${q}"
done
- name: Benchmark init - name: Benchmark init
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
@@ -409,15 +401,11 @@ jobs:
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
QUERIES=("SELECT version()") QUERY="SELECT version();"
if [[ "${PLATFORM}" = "neon"* ]]; then if [[ "${PLATFORM}" = "neon"* ]]; then
QUERIES+=("SHOW neon.tenant_id") QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
QUERIES+=("SHOW neon.timeline_id")
fi fi
psql ${CONNSTR} -c "${QUERY}"
for q in "${QUERIES[@]}"; do
psql ${CONNSTR} -c "${q}"
done
- name: ClickBench benchmark - name: ClickBench benchmark
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
@@ -519,15 +507,11 @@ jobs:
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
QUERIES=("SELECT version()") QUERY="SELECT version();"
if [[ "${PLATFORM}" = "neon"* ]]; then if [[ "${PLATFORM}" = "neon"* ]]; then
QUERIES+=("SHOW neon.tenant_id") QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
QUERIES+=("SHOW neon.timeline_id")
fi fi
psql ${CONNSTR} -c "${QUERY}"
for q in "${QUERIES[@]}"; do
psql ${CONNSTR} -c "${q}"
done
- name: Run TPC-H benchmark - name: Run TPC-H benchmark
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set
@@ -613,15 +597,11 @@ jobs:
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
QUERIES=("SELECT version()") QUERY="SELECT version();"
if [[ "${PLATFORM}" = "neon"* ]]; then if [[ "${PLATFORM}" = "neon"* ]]; then
QUERIES+=("SHOW neon.tenant_id") QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
QUERIES+=("SHOW neon.timeline_id")
fi fi
psql ${CONNSTR} -c "${QUERY}"
for q in "${QUERIES[@]}"; do
psql ${CONNSTR} -c "${q}"
done
- name: Run user examples - name: Run user examples
uses: ./.github/actions/run-python-test-set uses: ./.github/actions/run-python-test-set

View File

@@ -21,7 +21,6 @@ defaults:
concurrency: concurrency:
group: build-build-tools-image-${{ inputs.image-tag }} group: build-build-tools-image-${{ inputs.image-tag }}
cancel-in-progress: false
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job. # No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {} permissions: {}

View File

@@ -477,8 +477,6 @@ jobs:
BUILD_TAG: ${{ needs.tag.outputs.build-tag }} BUILD_TAG: ${{ needs.tag.outputs.build-tag }}
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
PAGESERVER_GET_VECTORED_IMPL: vectored PAGESERVER_GET_VECTORED_IMPL: vectored
PAGESERVER_GET_IMPL: vectored
PAGESERVER_VALIDATE_VEC_GET: true
# Temporary disable this step until we figure out why it's so flaky # Temporary disable this step until we figure out why it's so flaky
# Ref https://github.com/neondatabase/neon/issues/4540 # Ref https://github.com/neondatabase/neon/issues/4540
@@ -558,9 +556,6 @@ jobs:
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}" TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}"
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
PAGESERVER_GET_VECTORED_IMPL: vectored
PAGESERVER_GET_IMPL: vectored
PAGESERVER_VALIDATE_VEC_GET: false
# XXX: no coverage data handling here, since benchmarks are run on release builds, # XXX: no coverage data handling here, since benchmarks are run on release builds,
# while coverage is currently collected for the debug ones # while coverage is currently collected for the debug ones
@@ -740,7 +735,7 @@ jobs:
run: | run: |
mkdir -p .docker-custom mkdir -p .docker-custom
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
- uses: docker/setup-buildx-action@v2 - uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3 - uses: docker/login-action@v3
with: with:
@@ -797,7 +792,7 @@ jobs:
run: | run: |
mkdir -p .docker-custom mkdir -p .docker-custom
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
- uses: docker/setup-buildx-action@v2 - uses: docker/setup-buildx-action@v3
with: with:
# Disable parallelism for docker buildkit. # Disable parallelism for docker buildkit.
# As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner. # As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner.
@@ -870,7 +865,7 @@ jobs:
run: run:
shell: sh -eu {0} shell: sh -eu {0}
env: env:
VM_BUILDER_VERSION: v0.28.1 VM_BUILDER_VERSION: v0.23.2
steps: steps:
- name: Checkout - name: Checkout
@@ -1132,15 +1127,15 @@ jobs:
-f deployProxy=false \ -f deployProxy=false \
-f deployStorage=true \ -f deployStorage=true \
-f deployStorageBroker=true \ -f deployStorageBroker=true \
-f deployStorageController=true \
-f branch=main \ -f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}} \ -f dockerTag=${{needs.tag.outputs.build-tag}} \
-f deployPreprodRegion=true -f deployPreprodRegion=true
gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main \ gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main \
-f deployPgSniRouter=false \
-f deployProxy=false \
-f deployStorage=true \ -f deployStorage=true \
-f deployStorageBroker=true \ -f deployStorageBroker=true \
-f deployStorageController=true \
-f branch=main \ -f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}} -f dockerTag=${{needs.tag.outputs.build-tag}}
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
@@ -1149,7 +1144,6 @@ jobs:
-f deployProxy=true \ -f deployProxy=true \
-f deployStorage=false \ -f deployStorage=false \
-f deployStorageBroker=false \ -f deployStorageBroker=false \
-f deployStorageController=false \
-f branch=main \ -f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}} \ -f dockerTag=${{needs.tag.outputs.build-tag}} \
-f deployPreprodRegion=true -f deployPreprodRegion=true

View File

@@ -28,9 +28,7 @@ jobs:
- name: Get build-tools image tag for the current commit - name: Get build-tools image tag for the current commit
id: get-build-tools-tag id: get-build-tools-tag
env: env:
# Usually, for COMMIT_SHA, we use `github.event.pull_request.head.sha || github.sha`, but here, even for PRs, COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
# we want to use `github.sha` i.e. point to a phantom merge commit to determine the image tag correctly.
COMMIT_SHA: ${{ github.sha }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | run: |
LAST_BUILD_TOOLS_SHA=$( LAST_BUILD_TOOLS_SHA=$(

View File

@@ -20,7 +20,6 @@ defaults:
concurrency: concurrency:
group: pin-build-tools-image-${{ inputs.from-tag }} group: pin-build-tools-image-${{ inputs.from-tag }}
cancel-in-progress: false
permissions: {} permissions: {}

View File

@@ -62,14 +62,14 @@ jobs:
trigger-e2e-tests: trigger-e2e-tests:
needs: [ tag ] needs: [ tag ]
runs-on: ubuntu-latest runs-on: [ self-hosted, gen3, small ]
env: env:
TAG: ${{ needs.tag.outputs.build-tag }} TAG: ${{ needs.tag.outputs.build-tag }}
container:
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
options: --init
steps: steps:
- name: check if ecr image are present - name: check if ecr image are present
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
run: | run: |
for REPO in neon compute-tools compute-node-v14 vm-compute-node-v14 compute-node-v15 vm-compute-node-v15 compute-node-v16 vm-compute-node-v16; do for REPO in neon compute-tools compute-node-v14 vm-compute-node-v14 compute-node-v15 vm-compute-node-v15 compute-node-v16 vm-compute-node-v16; do
OUTPUT=$(aws ecr describe-images --repository-name ${REPO} --region eu-central-1 --query "imageDetails[?imageTags[?contains(@, '${TAG}')]]" --output text) OUTPUT=$(aws ecr describe-images --repository-name ${REPO} --region eu-central-1 --query "imageDetails[?imageTags[?contains(@, '${TAG}')]]" --output text)
@@ -79,55 +79,41 @@ jobs:
fi fi
done done
- name: Set e2e-platforms
id: e2e-platforms
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Default set of platforms to run e2e tests on
platforms='["docker", "k8s"]'
# If the PR changes vendor/, pgxn/ or libs/vm_monitor/ directories, or Dockerfile.compute-node, add k8s-neonvm to the list of platforms.
# If the workflow run is not a pull request, add k8s-neonvm to the list.
if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then
for f in $(gh api "/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUMBER}/files" --paginate --jq '.[].filename'); do
case "$f" in
vendor/*|pgxn/*|libs/vm_monitor/*|Dockerfile.compute-node)
platforms=$(echo "${platforms}" | jq --compact-output '. += ["k8s-neonvm"] | unique')
;;
*)
# no-op
;;
esac
done
else
platforms=$(echo "${platforms}" | jq --compact-output '. += ["k8s-neonvm"] | unique')
fi
echo "e2e-platforms=${platforms}" | tee -a $GITHUB_OUTPUT
- name: Set PR's status to pending and request a remote CI test - name: Set PR's status to pending and request a remote CI test
env:
E2E_PLATFORMS: ${{ steps.e2e-platforms.outputs.e2e-platforms }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
run: | run: |
REMOTE_REPO="${GITHUB_REPOSITORY_OWNER}/cloud" # For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
# to place a job run status update later.
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
gh api "/repos/${GITHUB_REPOSITORY}/statuses/${COMMIT_SHA}" \ REMOTE_REPO="${{ github.repository_owner }}/cloud"
--method POST \
--raw-field "state=pending" \
--raw-field "description=[$REMOTE_REPO] Remote CI job is about to start" \
--raw-field "context=neon-cloud-e2e"
gh workflow --repo ${REMOTE_REPO} \ curl -f -X POST \
run testing.yml \ https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
--ref "main" \ -H "Accept: application/vnd.github.v3+json" \
--raw-field "ci_job_name=neon-cloud-e2e" \ --user "${{ secrets.CI_ACCESS_TOKEN }}" \
--raw-field "commit_hash=$COMMIT_SHA" \ --data \
--raw-field "remote_repo=${GITHUB_REPOSITORY}" \ "{
--raw-field "storage_image_tag=${TAG}" \ \"state\": \"pending\",
--raw-field "compute_image_tag=${TAG}" \ \"context\": \"neon-cloud-e2e\",
--raw-field "concurrency_group=${E2E_CONCURRENCY_GROUP}" \ \"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
--raw-field "e2e-platforms=${E2E_PLATFORMS}" }"
curl -f -X POST \
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
-H "Accept: application/vnd.github.v3+json" \
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
--data \
"{
\"ref\": \"main\",
\"inputs\": {
\"ci_job_name\": \"neon-cloud-e2e\",
\"commit_hash\": \"$COMMIT_SHA\",
\"remote_repo\": \"${{ github.repository }}\",
\"storage_image_tag\": \"${TAG}\",
\"compute_image_tag\": \"${TAG}\",
\"concurrency_group\": \"${{ env.E2E_CONCURRENCY_GROUP }}\"
}
}"

View File

@@ -1,5 +1,5 @@
/compute_tools/ @neondatabase/control-plane @neondatabase/compute /compute_tools/ @neondatabase/control-plane @neondatabase/compute
/storage_controller @neondatabase/storage /control_plane/attachment_service @neondatabase/storage
/libs/pageserver_api/ @neondatabase/storage /libs/pageserver_api/ @neondatabase/storage
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/safekeepers /libs/postgres_ffi/ @neondatabase/compute @neondatabase/safekeepers
/libs/remote_storage/ @neondatabase/storage /libs/remote_storage/ @neondatabase/storage

903
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ resolver = "2"
members = [ members = [
"compute_tools", "compute_tools",
"control_plane", "control_plane",
"control_plane/storcon_cli", "control_plane/attachment_service",
"pageserver", "pageserver",
"pageserver/compaction", "pageserver/compaction",
"pageserver/ctl", "pageserver/ctl",
@@ -12,7 +12,6 @@ members = [
"proxy", "proxy",
"safekeeper", "safekeeper",
"storage_broker", "storage_broker",
"storage_controller",
"s3_scrubber", "s3_scrubber",
"workspace_hack", "workspace_hack",
"trace", "trace",
@@ -44,11 +43,10 @@ license = "Apache-2.0"
anyhow = { version = "1.0", features = ["backtrace"] } anyhow = { version = "1.0", features = ["backtrace"] }
arc-swap = "1.6" arc-swap = "1.6"
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] } async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
atomic-take = "1.1.0" azure_core = "0.18"
azure_core = "0.19" azure_identity = "0.18"
azure_identity = "0.19" azure_storage = "0.18"
azure_storage = "0.19" azure_storage_blobs = "0.18"
azure_storage_blobs = "0.19"
flate2 = "1.0.26" flate2 = "1.0.26"
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
@@ -98,7 +96,7 @@ http-types = { version = "2", default-features = false }
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1.1" humantime-serde = "1.1.1"
hyper = "0.14" hyper = "0.14"
hyper-tungstenite = "0.13.0" hyper-tungstenite = "0.11"
inotify = "0.10.2" inotify = "0.10.2"
ipnet = "2.9.0" ipnet = "2.9.0"
itertools = "0.10" itertools = "0.10"
@@ -107,8 +105,7 @@ lasso = "0.7"
leaky-bucket = "1.0.1" leaky-bucket = "1.0.1"
libc = "0.2" libc = "0.2"
md5 = "0.7.0" md5 = "0.7.0"
measured = { version = "0.0.21", features=["lasso"] } measured = { version = "0.0.13", features=["default", "lasso"] }
measured-process = { version = "0.0.21" }
memoffset = "0.8" memoffset = "0.8"
native-tls = "0.2" native-tls = "0.2"
nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] } nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] }
@@ -130,10 +127,10 @@ prost = "0.11"
rand = "0.8" rand = "0.8"
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] } redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
regex = "1.10.2" regex = "1.10.2"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_20"] } reqwest-tracing = { version = "0.4.7", features = ["opentelemetry_0_20"] }
reqwest-middleware = "0.3.0" reqwest-middleware = "0.2.0"
reqwest-retry = "0.5" reqwest-retry = "0.2.2"
routerify = "3" routerify = "3"
rpds = "0.13" rpds = "0.13"
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
@@ -143,7 +140,7 @@ rustls-split = "0.3"
scopeguard = "1.1" scopeguard = "1.1"
sysinfo = "0.29.2" sysinfo = "0.29.2"
sd-notify = "0.4.1" sd-notify = "0.4.1"
sentry = { version = "0.32", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] } sentry = { version = "0.31", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1" serde_json = "1"
serde_path_to_error = "0.1" serde_path_to_error = "0.1"
@@ -157,12 +154,11 @@ socket2 = "0.5"
strum = "0.24" strum = "0.24"
strum_macros = "0.24" strum_macros = "0.24"
"subtle" = "2.5.0" "subtle" = "2.5.0"
# https://github.com/nical/rust_debug/pull/4 svg_fmt = "0.4.1"
svg_fmt = { git = "https://github.com/neondatabase/fork--nical--rust_debug", branch = "neon" }
sync_wrapper = "0.1.2" sync_wrapper = "0.1.2"
tar = "0.4" tar = "0.4"
task-local-extensions = "0.1.4" task-local-extensions = "0.1.4"
test-context = "0.3" test-context = "0.1"
thiserror = "1.0" thiserror = "1.0"
tikv-jemallocator = "0.5" tikv-jemallocator = "0.5"
tikv-jemalloc-ctl = "0.5" tikv-jemalloc-ctl = "0.5"
@@ -177,11 +173,10 @@ tokio-util = { version = "0.7.10", features = ["io", "rt"] }
toml = "0.7" toml = "0.7"
toml_edit = "0.19" toml_edit = "0.19"
tonic = {version = "0.9", features = ["tls", "tls-roots"]} tonic = {version = "0.9", features = ["tls", "tls-roots"]}
tower-service = "0.3.2"
tracing = "0.1" tracing = "0.1"
tracing-error = "0.2.0" tracing-error = "0.2.0"
tracing-opentelemetry = "0.21.0" tracing-opentelemetry = "0.20.0"
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] } tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
twox-hash = { version = "1.6.3", default-features = false } twox-hash = { version = "1.6.3", default-features = false }
url = "2.2" url = "2.2"
urlencoding = "2.1" urlencoding = "2.1"

View File

@@ -58,12 +58,6 @@ RUN curl -fsSL "https://github.com/protocolbuffers/protobuf/releases/download/v$
&& mv protoc/include/google /usr/local/include/google \ && mv protoc/include/google /usr/local/include/google \
&& rm -rf protoc.zip protoc && rm -rf protoc.zip protoc
# s5cmd
ENV S5CMD_VERSION=2.2.2
RUN curl -sL "https://github.com/peak/s5cmd/releases/download/v${S5CMD_VERSION}/s5cmd_${S5CMD_VERSION}_Linux-$(uname -m | sed 's/x86_64/64bit/g' | sed 's/aarch64/arm64/g').tar.gz" | tar zxvf - s5cmd \
&& chmod +x s5cmd \
&& mv s5cmd /usr/local/bin/s5cmd
# LLVM # LLVM
ENV LLVM_VERSION=17 ENV LLVM_VERSION=17
RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \ RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \

View File

@@ -944,9 +944,6 @@ RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
# Create remote extension download directory
RUN mkdir /usr/local/download_extensions && chown -R postgres:postgres /usr/local/download_extensions
# Install: # Install:
# libreadline8 for psql # libreadline8 for psql
# libicu67, locales for collations (including ICU and plpgsql_check) # libicu67, locales for collations (including ICU and plpgsql_check)

View File

@@ -25,16 +25,14 @@ ifeq ($(UNAME_S),Linux)
# Seccomp BPF is only available for Linux # Seccomp BPF is only available for Linux
PG_CONFIGURE_OPTS += --with-libseccomp PG_CONFIGURE_OPTS += --with-libseccomp
else ifeq ($(UNAME_S),Darwin) else ifeq ($(UNAME_S),Darwin)
ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths
# macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable
# It can be configured with OPENSSL_PREFIX variable OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig
PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure # brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
endif
endif endif
# Use -C option so that when PostgreSQL "make install" installs the # Use -C option so that when PostgreSQL "make install" installs the

View File

@@ -818,15 +818,9 @@ impl ComputeNode {
Client::connect(zenith_admin_connstr.as_str(), NoTls) Client::connect(zenith_admin_connstr.as_str(), NoTls)
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?; .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
// Disable forwarding so that users don't get a cloud_admin role // Disable forwarding so that users don't get a cloud_admin role
client.simple_query("SET neon.forward_ddl = false")?;
let mut func = || { client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
client.simple_query("SET neon.forward_ddl = false")?; client.simple_query("GRANT zenith_admin TO cloud_admin")?;
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
client.simple_query("GRANT zenith_admin TO cloud_admin")?;
Ok::<_, anyhow::Error>(())
};
func().context("apply_config setup cloud_admin")?;
drop(client); drop(client);
// reconnect with connstring with expected name // reconnect with connstring with expected name
@@ -838,29 +832,24 @@ impl ComputeNode {
}; };
// Disable DDL forwarding because control plane already knows about these roles/databases. // Disable DDL forwarding because control plane already knows about these roles/databases.
client client.simple_query("SET neon.forward_ddl = false")?;
.simple_query("SET neon.forward_ddl = false")
.context("apply_config SET neon.forward_ddl = false")?;
// Proceed with post-startup configuration. Note, that order of operations is important. // Proceed with post-startup configuration. Note, that order of operations is important.
let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec; let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
create_neon_superuser(spec, &mut client).context("apply_config create_neon_superuser")?; create_neon_superuser(spec, &mut client)?;
cleanup_instance(&mut client).context("apply_config cleanup_instance")?; cleanup_instance(&mut client)?;
handle_roles(spec, &mut client).context("apply_config handle_roles")?; handle_roles(spec, &mut client)?;
handle_databases(spec, &mut client).context("apply_config handle_databases")?; handle_databases(spec, &mut client)?;
handle_role_deletions(spec, connstr.as_str(), &mut client) handle_role_deletions(spec, connstr.as_str(), &mut client)?;
.context("apply_config handle_role_deletions")?;
handle_grants( handle_grants(
spec, spec,
&mut client, &mut client,
connstr.as_str(), connstr.as_str(),
self.has_feature(ComputeFeature::AnonExtension), self.has_feature(ComputeFeature::AnonExtension),
) )?;
.context("apply_config handle_grants")?; handle_extensions(spec, &mut client)?;
handle_extensions(spec, &mut client).context("apply_config handle_extensions")?; handle_extension_neon(&mut client)?;
handle_extension_neon(&mut client).context("apply_config handle_extension_neon")?; create_availability_check_data(&mut client)?;
create_availability_check_data(&mut client)
.context("apply_config create_availability_check_data")?;
// 'Close' connection // 'Close' connection
drop(client); drop(client);
@@ -868,7 +857,7 @@ impl ComputeNode {
// Run migrations separately to not hold up cold starts // Run migrations separately to not hold up cold starts
thread::spawn(move || { thread::spawn(move || {
let mut client = Client::connect(connstr.as_str(), NoTls)?; let mut client = Client::connect(connstr.as_str(), NoTls)?;
handle_migrations(&mut client).context("apply_config handle_migrations") handle_migrations(&mut client)
}); });
Ok(()) Ok(())
} }
@@ -1273,12 +1262,10 @@ LIMIT 100",
.await .await
.map_err(DownloadError::Other); .map_err(DownloadError::Other);
if download_size.is_ok() { self.ext_download_progress
self.ext_download_progress .write()
.write() .expect("bad lock")
.expect("bad lock") .insert(ext_archive_name.to_string(), (download_start, true));
.insert(ext_archive_name.to_string(), (download_start, true));
}
download_size download_size
} }

View File

@@ -6,8 +6,8 @@ use std::path::Path;
use anyhow::Result; use anyhow::Result;
use crate::pg_helpers::escape_conf_value; use crate::pg_helpers::escape_conf_value;
use crate::pg_helpers::{GenericOptionExt, PgOptionsSerialize}; use crate::pg_helpers::PgOptionsSerialize;
use compute_api::spec::{ComputeMode, ComputeSpec, GenericOption}; use compute_api::spec::{ComputeMode, ComputeSpec};
/// Check that `line` is inside a text file and put it there if it is not. /// Check that `line` is inside a text file and put it there if it is not.
/// Create file if it doesn't exist. /// Create file if it doesn't exist.
@@ -92,27 +92,6 @@ pub fn write_postgres_conf(
} }
} }
if cfg!(target_os = "linux") {
// Check /proc/sys/vm/overcommit_memory -- if it equals 2 (i.e. linux memory overcommit is
// disabled), then the control plane has enabled swap and we should set
// dynamic_shared_memory_type = 'mmap'.
//
// This is (maybe?) temporary - for more, see https://github.com/neondatabase/cloud/issues/12047.
let overcommit_memory_contents = std::fs::read_to_string("/proc/sys/vm/overcommit_memory")
// ignore any errors - they may be expected to occur under certain situations (e.g. when
// not running in Linux).
.unwrap_or_else(|_| String::new());
if overcommit_memory_contents.trim() == "2" {
let opt = GenericOption {
name: "dynamic_shared_memory_type".to_owned(),
value: Some("mmap".to_owned()),
vartype: "enum".to_owned(),
};
write!(file, "{}", opt.to_pg_setting())?;
}
}
// If there are any extra options in the 'settings' field, append those // If there are any extra options in the 'settings' field, append those
if spec.cluster.settings.is_some() { if spec.cluster.settings.is_some() {
writeln!(file, "# Managed by compute_ctl: begin")?; writeln!(file, "# Managed by compute_ctl: begin")?;

View File

@@ -44,7 +44,7 @@ pub fn escape_conf_value(s: &str) -> String {
format!("'{}'", res) format!("'{}'", res)
} }
pub trait GenericOptionExt { trait GenericOptionExt {
fn to_pg_option(&self) -> String; fn to_pg_option(&self) -> String;
fn to_pg_setting(&self) -> String; fn to_pg_setting(&self) -> String;
} }

View File

@@ -2,7 +2,7 @@ use std::fs::File;
use std::path::Path; use std::path::Path;
use std::str::FromStr; use std::str::FromStr;
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Result};
use postgres::config::Config; use postgres::config::Config;
use postgres::{Client, NoTls}; use postgres::{Client, NoTls};
use reqwest::StatusCode; use reqwest::StatusCode;
@@ -302,9 +302,9 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
RoleAction::Create => { RoleAction::Create => {
// This branch only runs when roles are created through the console, so it is // This branch only runs when roles are created through the console, so it is
// safe to add more permissions here. BYPASSRLS and REPLICATION are inherited // safe to add more permissions here. BYPASSRLS and REPLICATION are inherited
// from neon_superuser. // from neon_superuser. (NOTE: REPLICATION has been removed from here for now).
let mut query: String = format!( let mut query: String = format!(
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser", "CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS IN ROLE neon_superuser",
name.pg_quote() name.pg_quote()
); );
info!("running role create query: '{}'", &query); info!("running role create query: '{}'", &query);
@@ -698,8 +698,7 @@ pub fn handle_grants(
// it is important to run this after all grants // it is important to run this after all grants
if enable_anon_extension { if enable_anon_extension {
handle_extension_anon(spec, &db.owner, &mut db_client, false) handle_extension_anon(spec, &db.owner, &mut db_client, false)?;
.context("handle_grants handle_extension_anon")?;
} }
} }
@@ -744,24 +743,21 @@ pub fn handle_extension_neon(client: &mut Client) -> Result<()> {
// which may happen in two cases: // which may happen in two cases:
// - extension was just installed // - extension was just installed
// - extension was already installed and is up to date // - extension was already installed and is up to date
let query = "ALTER EXTENSION neon UPDATE"; // DISABLED due to compute node unpinning epic
info!("update neon extension version with query: {}", query); // let query = "ALTER EXTENSION neon UPDATE";
if let Err(e) = client.simple_query(query) { // info!("update neon extension version with query: {}", query);
error!( // client.simple_query(query)?;
"failed to upgrade neon extension during `handle_extension_neon`: {}",
e
);
}
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> { pub fn handle_neon_extension_upgrade(_client: &mut Client) -> Result<()> {
info!("handle neon extension upgrade"); info!("handle neon extension upgrade (not really)");
let query = "ALTER EXTENSION neon UPDATE"; // DISABLED due to compute node unpinning epic
info!("update neon extension version with query: {}", query); // let query = "ALTER EXTENSION neon UPDATE";
client.simple_query(query)?; // info!("update neon extension version with query: {}", query);
// client.simple_query(query)?;
Ok(()) Ok(())
} }
@@ -810,40 +806,43 @@ $$;"#,
"", "",
"", "",
"", "",
"",
// Add new migrations below. // Add new migrations below.
r#"
DO $$
DECLARE
role_name TEXT;
BEGIN
FOR role_name IN SELECT rolname FROM pg_roles WHERE rolreplication IS TRUE
LOOP
RAISE NOTICE 'EXECUTING ALTER ROLE % NOREPLICATION', quote_ident(role_name);
EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOREPLICATION';
END LOOP;
END
$$;"#,
]; ];
let mut func = || { let mut query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration"; client.simple_query(query)?;
client.simple_query(query)?;
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)"; query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
client.simple_query(query)?; client.simple_query(query)?;
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING"; query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
client.simple_query(query)?; client.simple_query(query)?;
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin"; query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
client.simple_query(query)?; client.simple_query(query)?;
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC"; query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
client.simple_query(query)?; client.simple_query(query)?;
Ok::<_, anyhow::Error>(())
};
func().context("handle_migrations prepare")?;
let query = "SELECT id FROM neon_migration.migration_id"; query = "SELECT id FROM neon_migration.migration_id";
let row = client let row = client.query_one(query, &[])?;
.query_one(query, &[])
.context("handle_migrations get migration_id")?;
let mut current_migration: usize = row.get::<&str, i64>("id") as usize; let mut current_migration: usize = row.get::<&str, i64>("id") as usize;
let starting_migration_id = current_migration; let starting_migration_id = current_migration;
let query = "BEGIN"; query = "BEGIN";
client client.simple_query(query)?;
.simple_query(query)
.context("handle_migrations begin")?;
while current_migration < migrations.len() { while current_migration < migrations.len() {
let migration = &migrations[current_migration]; let migration = &migrations[current_migration];
@@ -851,9 +850,7 @@ $$;"#,
info!("Skip migration id={}", current_migration); info!("Skip migration id={}", current_migration);
} else { } else {
info!("Running migration:\n{}\n", migration); info!("Running migration:\n{}\n", migration);
client.simple_query(migration).with_context(|| { client.simple_query(migration)?;
format!("handle_migrations current_migration={}", current_migration)
})?;
} }
current_migration += 1; current_migration += 1;
} }
@@ -861,14 +858,10 @@ $$;"#,
"UPDATE neon_migration.migration_id SET id={}", "UPDATE neon_migration.migration_id SET id={}",
migrations.len() migrations.len()
); );
client client.simple_query(&setval)?;
.simple_query(&setval)
.context("handle_migrations update id")?;
let query = "COMMIT"; query = "COMMIT";
client client.simple_query(query)?;
.simple_query(query)
.context("handle_migrations commit")?;
info!( info!(
"Ran {} migrations", "Ran {} migrations",

View File

@@ -17,7 +17,6 @@ nix.workspace = true
once_cell.workspace = true once_cell.workspace = true
postgres.workspace = true postgres.workspace = true
hex.workspace = true hex.workspace = true
humantime-serde.workspace = true
hyper.workspace = true hyper.workspace = true
regex.workspace = true regex.workspace = true
reqwest = { workspace = true, features = ["blocking", "json"] } reqwest = { workspace = true, features = ["blocking", "json"] }

View File

@@ -1,5 +1,5 @@
[package] [package]
name = "storage_controller" name = "attachment_service"
version = "0.1.0" version = "0.1.0"
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@@ -25,13 +25,12 @@ git-version.workspace = true
hex.workspace = true hex.workspace = true
hyper.workspace = true hyper.workspace = true
humantime.workspace = true humantime.workspace = true
itertools.workspace = true
lasso.workspace = true lasso.workspace = true
once_cell.workspace = true once_cell.workspace = true
pageserver_api.workspace = true pageserver_api.workspace = true
pageserver_client.workspace = true pageserver_client.workspace = true
postgres_connection.workspace = true postgres_connection.workspace = true
reqwest = { workspace = true, features = ["stream"] } reqwest.workspace = true
routerify.workspace = true routerify.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
@@ -45,8 +44,8 @@ diesel = { version = "2.1.4", features = ["serde_json", "postgres", "r2d2"] }
diesel_migrations = { version = "2.1.0" } diesel_migrations = { version = "2.1.0" }
r2d2 = { version = "0.8.10" } r2d2 = { version = "0.8.10" }
utils = { path = "../libs/utils/" } utils = { path = "../../libs/utils/" }
metrics = { path = "../libs/metrics/" } metrics = { path = "../../libs/metrics/" }
control_plane = { path = "../control_plane" } control_plane = { path = ".." }
workspace_hack = { version = "0.1", path = "../workspace_hack" } workspace_hack = { version = "0.1", path = "../../workspace_hack" }

View File

@@ -0,0 +1,462 @@
use std::{collections::HashMap, time::Duration};
use control_plane::endpoint::{ComputeControlPlane, EndpointStatus};
use control_plane::local_env::LocalEnv;
use hyper::{Method, StatusCode};
use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId};
use postgres_connection::parse_host_port;
use serde::{Deserialize, Serialize};
use tokio_util::sync::CancellationToken;
use utils::{
backoff::{self},
id::{NodeId, TenantId},
};
use crate::service::Config;
const BUSY_DELAY: Duration = Duration::from_secs(1);
const SLOWDOWN_DELAY: Duration = Duration::from_secs(5);
pub(crate) const API_CONCURRENCY: usize = 32;
struct ShardedComputeHookTenant {
stripe_size: ShardStripeSize,
shard_count: ShardCount,
shards: Vec<(ShardNumber, NodeId)>,
}
enum ComputeHookTenant {
Unsharded(NodeId),
Sharded(ShardedComputeHookTenant),
}
impl ComputeHookTenant {
/// Construct with at least one shard's information
fn new(tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize, node_id: NodeId) -> Self {
if tenant_shard_id.shard_count.count() > 1 {
Self::Sharded(ShardedComputeHookTenant {
shards: vec![(tenant_shard_id.shard_number, node_id)],
stripe_size,
shard_count: tenant_shard_id.shard_count,
})
} else {
Self::Unsharded(node_id)
}
}
/// Set one shard's location. If stripe size or shard count have changed, Self is reset
/// and drops existing content.
fn update(
&mut self,
tenant_shard_id: TenantShardId,
stripe_size: ShardStripeSize,
node_id: NodeId,
) {
match self {
Self::Unsharded(existing_node_id) if tenant_shard_id.shard_count.count() == 1 => {
*existing_node_id = node_id
}
Self::Sharded(sharded_tenant)
if sharded_tenant.stripe_size == stripe_size
&& sharded_tenant.shard_count == tenant_shard_id.shard_count =>
{
if let Some(existing) = sharded_tenant
.shards
.iter()
.position(|s| s.0 == tenant_shard_id.shard_number)
{
sharded_tenant.shards.get_mut(existing).unwrap().1 = node_id;
} else {
sharded_tenant
.shards
.push((tenant_shard_id.shard_number, node_id));
sharded_tenant.shards.sort_by_key(|s| s.0)
}
}
_ => {
// Shard count changed: reset struct.
*self = Self::new(tenant_shard_id, stripe_size, node_id);
}
}
}
}
#[derive(Serialize, Deserialize, Debug)]
struct ComputeHookNotifyRequestShard {
node_id: NodeId,
shard_number: ShardNumber,
}
/// Request body that we send to the control plane to notify it of where a tenant is attached
#[derive(Serialize, Deserialize, Debug)]
struct ComputeHookNotifyRequest {
tenant_id: TenantId,
stripe_size: Option<ShardStripeSize>,
shards: Vec<ComputeHookNotifyRequestShard>,
}
/// Error type for attempts to call into the control plane compute notification hook
#[derive(thiserror::Error, Debug)]
pub(crate) enum NotifyError {
// Request was not send successfully, e.g. transport error
#[error("Sending request: {0}")]
Request(#[from] reqwest::Error),
// Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon.
#[error("Control plane tenant busy")]
Busy,
// Explicit 429 response asking us to retry less frequently
#[error("Control plane overloaded")]
SlowDown,
// A 503 response indicates the control plane can't handle the request right now
#[error("Control plane unavailable (status {0})")]
Unavailable(StatusCode),
// API returned unexpected non-success status. We will retry, but log a warning.
#[error("Control plane returned unexpected status {0}")]
Unexpected(StatusCode),
// We shutdown while sending
#[error("Shutting down")]
ShuttingDown,
// A response indicates we will never succeed, such as 400 or 404
#[error("Non-retryable error {0}")]
Fatal(StatusCode),
}
impl ComputeHookTenant {
fn maybe_reconfigure(&self, tenant_id: TenantId) -> Option<ComputeHookNotifyRequest> {
match self {
Self::Unsharded(node_id) => Some(ComputeHookNotifyRequest {
tenant_id,
shards: vec![ComputeHookNotifyRequestShard {
shard_number: ShardNumber(0),
node_id: *node_id,
}],
stripe_size: None,
}),
Self::Sharded(sharded_tenant)
if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize =>
{
Some(ComputeHookNotifyRequest {
tenant_id,
shards: sharded_tenant
.shards
.iter()
.map(|(shard_number, node_id)| ComputeHookNotifyRequestShard {
shard_number: *shard_number,
node_id: *node_id,
})
.collect(),
stripe_size: Some(sharded_tenant.stripe_size),
})
}
Self::Sharded(sharded_tenant) => {
// Sharded tenant doesn't yet have information for all its shards
tracing::info!(
"ComputeHookTenant::maybe_reconfigure: not enough shards ({}/{})",
sharded_tenant.shards.len(),
sharded_tenant.shard_count.count()
);
None
}
}
}
}
/// The compute hook is a destination for notifications about changes to tenant:pageserver
/// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures
/// the compute connection string.
pub(super) struct ComputeHook {
config: Config,
state: tokio::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>,
authorization_header: Option<String>,
}
impl ComputeHook {
pub(super) fn new(config: Config) -> Self {
let authorization_header = config
.control_plane_jwt_token
.clone()
.map(|jwt| format!("Bearer {}", jwt));
Self {
state: Default::default(),
config,
authorization_header,
}
}
/// For test environments: use neon_local's LocalEnv to update compute
async fn do_notify_local(
&self,
reconfigure_request: ComputeHookNotifyRequest,
) -> anyhow::Result<()> {
let env = match LocalEnv::load_config() {
Ok(e) => e,
Err(e) => {
tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})");
return Ok(());
}
};
let cplane =
ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane");
let ComputeHookNotifyRequest {
tenant_id,
shards,
stripe_size,
} = reconfigure_request;
let compute_pageservers = shards
.into_iter()
.map(|shard| {
let ps_conf = env
.get_pageserver_conf(shard.node_id)
.expect("Unknown pageserver");
let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
.expect("Unable to parse listen_pg_addr");
(pg_host, pg_port.unwrap_or(5432))
})
.collect::<Vec<_>>();
for (endpoint_name, endpoint) in &cplane.endpoints {
if endpoint.tenant_id == tenant_id && endpoint.status() == EndpointStatus::Running {
tracing::info!("Reconfiguring endpoint {}", endpoint_name,);
endpoint
.reconfigure(compute_pageservers.clone(), stripe_size)
.await?;
}
}
Ok(())
}
async fn do_notify_iteration(
&self,
client: &reqwest::Client,
url: &String,
reconfigure_request: &ComputeHookNotifyRequest,
cancel: &CancellationToken,
) -> Result<(), NotifyError> {
let req = client.request(Method::PUT, url);
let req = if let Some(value) = &self.authorization_header {
req.header(reqwest::header::AUTHORIZATION, value)
} else {
req
};
tracing::info!(
"Sending notify request to {} ({:?})",
url,
reconfigure_request
);
let send_result = req.json(&reconfigure_request).send().await;
let response = match send_result {
Ok(r) => r,
Err(e) => return Err(e.into()),
};
// Treat all 2xx responses as success
if response.status() >= StatusCode::OK && response.status() < StatusCode::MULTIPLE_CHOICES {
if response.status() != StatusCode::OK {
// Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so
// log a warning.
tracing::warn!(
"Unexpected 2xx response code {} from control plane",
response.status()
);
}
return Ok(());
}
// Error response codes
match response.status() {
StatusCode::TOO_MANY_REQUESTS => {
// TODO: 429 handling should be global: set some state visible to other requests
// so that they will delay before starting, rather than all notifications trying
// once before backing off.
tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled())
.await
.ok();
Err(NotifyError::SlowDown)
}
StatusCode::LOCKED => {
// Delay our retry if busy: the usual fast exponential backoff in backoff::retry
// is not appropriate
tokio::time::timeout(BUSY_DELAY, cancel.cancelled())
.await
.ok();
Err(NotifyError::Busy)
}
StatusCode::SERVICE_UNAVAILABLE
| StatusCode::GATEWAY_TIMEOUT
| StatusCode::BAD_GATEWAY => Err(NotifyError::Unavailable(response.status())),
StatusCode::BAD_REQUEST | StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
Err(NotifyError::Fatal(response.status()))
}
_ => Err(NotifyError::Unexpected(response.status())),
}
}
async fn do_notify(
&self,
url: &String,
reconfigure_request: ComputeHookNotifyRequest,
cancel: &CancellationToken,
) -> Result<(), NotifyError> {
let client = reqwest::Client::new();
backoff::retry(
|| self.do_notify_iteration(&client, url, &reconfigure_request, cancel),
|e| matches!(e, NotifyError::Fatal(_) | NotifyError::Unexpected(_)),
3,
10,
"Send compute notification",
cancel,
)
.await
.ok_or_else(|| NotifyError::ShuttingDown)
.and_then(|x| x)
}
/// Call this to notify the compute (postgres) tier of new pageservers to use
/// for a tenant. notify() is called by each shard individually, and this function
/// will decide whether an update to the tenant is sent. An update is sent on the
/// condition that:
/// - We know a pageserver for every shard.
/// - All the shards have the same shard_count (i.e. we are not mid-split)
///
/// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler
/// that is cancelled.
///
/// This function is fallible, including in the case that the control plane is transiently
/// unavailable. A limited number of retries are done internally to efficiently hide short unavailability
/// periods, but we don't retry forever. The **caller** is responsible for handling failures and
/// ensuring that they eventually call again to ensure that the compute is eventually notified of
/// the proper pageserver nodes for a tenant.
#[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), node_id))]
pub(super) async fn notify(
&self,
tenant_shard_id: TenantShardId,
node_id: NodeId,
stripe_size: ShardStripeSize,
cancel: &CancellationToken,
) -> Result<(), NotifyError> {
let mut locked = self.state.lock().await;
use std::collections::hash_map::Entry;
let tenant = match locked.entry(tenant_shard_id.tenant_id) {
Entry::Vacant(e) => e.insert(ComputeHookTenant::new(
tenant_shard_id,
stripe_size,
node_id,
)),
Entry::Occupied(e) => {
let tenant = e.into_mut();
tenant.update(tenant_shard_id, stripe_size, node_id);
tenant
}
};
let reconfigure_request = tenant.maybe_reconfigure(tenant_shard_id.tenant_id);
let Some(reconfigure_request) = reconfigure_request else {
// The tenant doesn't yet have pageservers for all its shards: we won't notify anything
// until it does.
tracing::info!("Tenant isn't yet ready to emit a notification");
return Ok(());
};
if let Some(notify_url) = &self.config.compute_hook_url {
self.do_notify(notify_url, reconfigure_request, cancel)
.await
} else {
self.do_notify_local(reconfigure_request)
.await
.map_err(|e| {
// This path is for testing only, so munge the error into our prod-style error type.
tracing::error!("Local notification hook failed: {e}");
NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)
})
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use pageserver_api::shard::{ShardCount, ShardNumber};
use utils::id::TenantId;
use super::*;
#[test]
fn tenant_updates() -> anyhow::Result<()> {
let tenant_id = TenantId::generate();
let mut tenant_state = ComputeHookTenant::new(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(0),
shard_number: ShardNumber(0),
},
ShardStripeSize(12345),
NodeId(1),
);
// An unsharded tenant is always ready to emit a notification
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.shards
.len(),
1
);
assert!(tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.stripe_size
.is_none());
// Writing the first shard of a multi-sharded situation (i.e. in a split)
// resets the tenant state and puts it in an non-notifying state (need to
// see all shards)
tenant_state.update(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(2),
shard_number: ShardNumber(1),
},
ShardStripeSize(32768),
NodeId(1),
);
assert!(tenant_state.maybe_reconfigure(tenant_id).is_none());
// Writing the second shard makes it ready to notify
tenant_state.update(
TenantShardId {
tenant_id,
shard_count: ShardCount::new(2),
shard_number: ShardNumber(0),
},
ShardStripeSize(32768),
NodeId(1),
);
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.shards
.len(),
2
);
assert_eq!(
tenant_state
.maybe_reconfigure(tenant_id)
.unwrap()
.stripe_size,
Some(ShardStripeSize(32768))
);
Ok(())
}
}

View File

@@ -184,19 +184,6 @@ impl HeartbeaterTask {
} }
} }
} }
tracing::info!(
"Heartbeat round complete for {} nodes, {} offline",
new_state.len(),
new_state
.values()
.filter(|s| match s {
PageserverState::Available { .. } => {
false
}
PageserverState::Offline => true,
})
.count()
);
let mut deltas = Vec::new(); let mut deltas = Vec::new();
let now = Instant::now(); let now = Instant::now();

View File

@@ -4,12 +4,10 @@ use crate::metrics::{
}; };
use crate::reconciler::ReconcileError; use crate::reconciler::ReconcileError;
use crate::service::{Service, STARTUP_RECONCILE_TIMEOUT}; use crate::service::{Service, STARTUP_RECONCILE_TIMEOUT};
use anyhow::Context;
use futures::Future; use futures::Future;
use hyper::header::CONTENT_TYPE; use hyper::header::CONTENT_TYPE;
use hyper::{Body, Request, Response}; use hyper::{Body, Request, Response};
use hyper::{StatusCode, Uri}; use hyper::{StatusCode, Uri};
use metrics::{BuildInfo, NeonMetrics};
use pageserver_api::models::{ use pageserver_api::models::{
TenantConfigRequest, TenantCreateRequest, TenantLocationConfigRequest, TenantShardSplitRequest, TenantConfigRequest, TenantCreateRequest, TenantLocationConfigRequest, TenantShardSplitRequest,
TenantTimeTravelRequest, TimelineCreateRequest, TenantTimeTravelRequest, TimelineCreateRequest,
@@ -36,8 +34,7 @@ use utils::{
}; };
use pageserver_api::controller_api::{ use pageserver_api::controller_api::{
NodeAvailability, NodeConfigureRequest, NodeRegisterRequest, TenantPolicyRequest, NodeAvailability, NodeConfigureRequest, NodeRegisterRequest, TenantShardMigrateRequest,
TenantShardMigrateRequest,
}; };
use pageserver_api::upcall_api::{ReAttachRequest, ValidateRequest}; use pageserver_api::upcall_api::{ReAttachRequest, ValidateRequest};
@@ -46,19 +43,15 @@ use control_plane::storage_controller::{AttachHookRequest, InspectRequest};
use routerify::Middleware; use routerify::Middleware;
/// State available to HTTP request handlers /// State available to HTTP request handlers
#[derive(Clone)]
pub struct HttpState { pub struct HttpState {
service: Arc<crate::service::Service>, service: Arc<crate::service::Service>,
auth: Option<Arc<SwappableJwtAuth>>, auth: Option<Arc<SwappableJwtAuth>>,
neon_metrics: NeonMetrics,
allowlist_routes: Vec<Uri>, allowlist_routes: Vec<Uri>,
} }
impl HttpState { impl HttpState {
pub fn new( pub fn new(service: Arc<crate::service::Service>, auth: Option<Arc<SwappableJwtAuth>>) -> Self {
service: Arc<crate::service::Service>,
auth: Option<Arc<SwappableJwtAuth>>,
build_info: BuildInfo,
) -> Self {
let allowlist_routes = ["/status", "/ready", "/metrics"] let allowlist_routes = ["/status", "/ready", "/metrics"]
.iter() .iter()
.map(|v| v.parse().unwrap()) .map(|v| v.parse().unwrap())
@@ -66,7 +59,6 @@ impl HttpState {
Self { Self {
service, service,
auth, auth,
neon_metrics: NeonMetrics::new(build_info),
allowlist_routes, allowlist_routes,
} }
} }
@@ -259,12 +251,6 @@ async fn handle_tenant_time_travel_remote_storage(
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
} }
fn map_reqwest_hyper_status(status: reqwest::StatusCode) -> Result<hyper::StatusCode, ApiError> {
hyper::StatusCode::from_u16(status.as_u16())
.context("invalid status code")
.map_err(ApiError::InternalServerError)
}
async fn handle_tenant_secondary_download( async fn handle_tenant_secondary_download(
service: Arc<Service>, service: Arc<Service>,
req: Request<Body>, req: Request<Body>,
@@ -273,7 +259,7 @@ async fn handle_tenant_secondary_download(
let wait = parse_query_param(&req, "wait_ms")?.map(Duration::from_millis); let wait = parse_query_param(&req, "wait_ms")?.map(Duration::from_millis);
let (status, progress) = service.tenant_secondary_download(tenant_id, wait).await?; let (status, progress) = service.tenant_secondary_download(tenant_id, wait).await?;
json_response(map_reqwest_hyper_status(status)?, progress) json_response(status, progress)
} }
async fn handle_tenant_delete( async fn handle_tenant_delete(
@@ -284,10 +270,7 @@ async fn handle_tenant_delete(
check_permissions(&req, Scope::PageServerApi)?; check_permissions(&req, Scope::PageServerApi)?;
deletion_wrapper(service, move |service| async move { deletion_wrapper(service, move |service| async move {
service service.tenant_delete(tenant_id).await
.tenant_delete(tenant_id)
.await
.and_then(map_reqwest_hyper_status)
}) })
.await .await
} }
@@ -318,10 +301,7 @@ async fn handle_tenant_timeline_delete(
let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?; let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?;
deletion_wrapper(service, move |service| async move { deletion_wrapper(service, move |service| async move {
service service.tenant_timeline_delete(tenant_id, timeline_id).await
.tenant_timeline_delete(tenant_id, timeline_id)
.await
.and_then(map_reqwest_hyper_status)
}) })
.await .await
} }
@@ -384,9 +364,11 @@ async fn handle_tenant_timeline_passthrough(
} }
// We have a reqest::Response, would like a http::Response // We have a reqest::Response, would like a http::Response
let mut builder = hyper::Response::builder().status(map_reqwest_hyper_status(resp.status())?); let mut builder = hyper::Response::builder()
.status(resp.status())
.version(resp.version());
for (k, v) in resp.headers() { for (k, v) in resp.headers() {
builder = builder.header(k.as_str(), v.as_bytes()); builder = builder.header(k, v);
} }
let response = builder let response = builder
@@ -416,15 +398,6 @@ async fn handle_tenant_describe(
json_response(StatusCode::OK, service.tenant_describe(tenant_id)?) json_response(StatusCode::OK, service.tenant_describe(tenant_id)?)
} }
async fn handle_tenant_list(
service: Arc<Service>,
req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
json_response(StatusCode::OK, service.tenant_list())
}
async fn handle_node_register(mut req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_node_register(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?; check_permissions(&req, Scope::Admin)?;
@@ -438,10 +411,7 @@ async fn handle_node_list(req: Request<Body>) -> Result<Response<Body>, ApiError
check_permissions(&req, Scope::Admin)?; check_permissions(&req, Scope::Admin)?;
let state = get_state(&req); let state = get_state(&req);
let nodes = state.service.node_list().await?; json_response(StatusCode::OK, state.service.node_list().await?)
let api_nodes = nodes.into_iter().map(|n| n.describe()).collect::<Vec<_>>();
json_response(StatusCode::OK, api_nodes)
} }
async fn handle_node_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_node_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
@@ -508,22 +478,6 @@ async fn handle_tenant_shard_migrate(
) )
} }
async fn handle_tenant_update_policy(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
let update_req = json_request::<TenantPolicyRequest>(&mut req).await?;
let state = get_state(&req);
json_response(
StatusCode::OK,
state
.service
.tenant_update_policy(tenant_id, update_req)
.await?,
)
}
async fn handle_tenant_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_tenant_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?; check_permissions(&req, Scope::PageServerApi)?;
@@ -533,18 +487,6 @@ async fn handle_tenant_drop(req: Request<Body>) -> Result<Response<Body>, ApiErr
json_response(StatusCode::OK, state.service.tenant_drop(tenant_id).await?) json_response(StatusCode::OK, state.service.tenant_drop(tenant_id).await?)
} }
async fn handle_tenant_import(req: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let state = get_state(&req);
json_response(
StatusCode::OK,
state.service.tenant_import(tenant_id).await?,
)
}
async fn handle_tenants_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_tenants_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?; check_permissions(&req, Scope::Admin)?;
@@ -567,14 +509,6 @@ async fn handle_consistency_check(req: Request<Body>) -> Result<Response<Body>,
json_response(StatusCode::OK, state.service.consistency_check().await?) json_response(StatusCode::OK, state.service.consistency_check().await?)
} }
async fn handle_reconcile_all(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
json_response(StatusCode::OK, state.service.reconcile_all_now().await?)
}
/// Status endpoint is just used for checking that our HTTP listener is up /// Status endpoint is just used for checking that our HTTP listener is up
async fn handle_status(_req: Request<Body>) -> Result<Response<Body>, ApiError> { async fn handle_status(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
@@ -631,17 +565,9 @@ where
.await .await
} }
/// Check if the required scope is held in the request's token, or if the request has
/// a token with 'admin' scope then always permit it.
fn check_permissions(request: &Request<Body>, required_scope: Scope) -> Result<(), ApiError> { fn check_permissions(request: &Request<Body>, required_scope: Scope) -> Result<(), ApiError> {
check_permission_with(request, |claims| { check_permission_with(request, |claims| {
match crate::auth::check_permission(claims, required_scope) { crate::auth::check_permission(claims, required_scope)
Err(e) => match crate::auth::check_permission(claims, Scope::Admin) {
Ok(()) => Ok(()),
Err(_) => Err(e),
},
Ok(()) => Ok(()),
}
}) })
} }
@@ -701,11 +627,10 @@ fn epilogue_metrics_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>
}) })
} }
pub async fn measured_metrics_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> { pub async fn measured_metrics_handler(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
pub const TEXT_FORMAT: &str = "text/plain; version=0.0.4"; pub const TEXT_FORMAT: &str = "text/plain; version=0.0.4";
let state = get_state(&req); let payload = crate::metrics::METRICS_REGISTRY.encode();
let payload = crate::metrics::METRICS_REGISTRY.encode(&state.neon_metrics);
let response = Response::builder() let response = Response::builder()
.status(200) .status(200)
.header(CONTENT_TYPE, TEXT_FORMAT) .header(CONTENT_TYPE, TEXT_FORMAT)
@@ -734,7 +659,6 @@ where
pub fn make_router( pub fn make_router(
service: Arc<Service>, service: Arc<Service>,
auth: Option<Arc<SwappableJwtAuth>>, auth: Option<Arc<SwappableJwtAuth>>,
build_info: BuildInfo,
) -> RouterBuilder<hyper::Body, ApiError> { ) -> RouterBuilder<hyper::Body, ApiError> {
let mut router = endpoint::make_router() let mut router = endpoint::make_router()
.middleware(prologue_metrics_middleware()) .middleware(prologue_metrics_middleware())
@@ -751,7 +675,7 @@ pub fn make_router(
} }
router router
.data(Arc::new(HttpState::new(service, auth, build_info))) .data(Arc::new(HttpState::new(service, auth)))
.get("/metrics", |r| { .get("/metrics", |r| {
named_request_span(r, measured_metrics_handler, RequestName("metrics")) named_request_span(r, measured_metrics_handler, RequestName("metrics"))
}) })
@@ -782,13 +706,6 @@ pub fn make_router(
.post("/debug/v1/node/:node_id/drop", |r| { .post("/debug/v1/node/:node_id/drop", |r| {
named_request_span(r, handle_node_drop, RequestName("debug_v1_node_drop")) named_request_span(r, handle_node_drop, RequestName("debug_v1_node_drop"))
}) })
.post("/debug/v1/tenant/:tenant_id/import", |r| {
named_request_span(
r,
handle_tenant_import,
RequestName("debug_v1_tenant_import"),
)
})
.get("/debug/v1/tenant", |r| { .get("/debug/v1/tenant", |r| {
named_request_span(r, handle_tenants_dump, RequestName("debug_v1_tenant")) named_request_span(r, handle_tenants_dump, RequestName("debug_v1_tenant"))
}) })
@@ -809,9 +726,6 @@ pub fn make_router(
RequestName("debug_v1_consistency_check"), RequestName("debug_v1_consistency_check"),
) )
}) })
.post("/debug/v1/reconcile_all", |r| {
request_span(r, handle_reconcile_all)
})
.put("/debug/v1/failpoints", |r| { .put("/debug/v1/failpoints", |r| {
request_span(r, |r| failpoints_handler(r, CancellationToken::new())) request_span(r, |r| failpoints_handler(r, CancellationToken::new()))
}) })
@@ -851,16 +765,6 @@ pub fn make_router(
RequestName("control_v1_tenant_describe"), RequestName("control_v1_tenant_describe"),
) )
}) })
.get("/control/v1/tenant", |r| {
tenant_service_handler(r, handle_tenant_list, RequestName("control_v1_tenant_list"))
})
.put("/control/v1/tenant/:tenant_id/policy", |r| {
named_request_span(
r,
handle_tenant_update_policy,
RequestName("control_v1_tenant_policy"),
)
})
// Tenant operations // Tenant operations
// The ^/v1/ endpoints act as a "Virtual Pageserver", enabling shard-naive clients to call into // The ^/v1/ endpoints act as a "Virtual Pageserver", enabling shard-naive clients to call into
// this service to manage tenants that actually consist of many tenant shards, as if they are a single entity. // this service to manage tenants that actually consist of many tenant shards, as if they are a single entity.

View File

@@ -14,7 +14,7 @@ mod reconciler;
mod scheduler; mod scheduler;
mod schema; mod schema;
pub mod service; pub mod service;
mod tenant_shard; mod tenant_state;
#[derive(Ord, PartialOrd, Eq, PartialEq, Copy, Clone, Serialize)] #[derive(Ord, PartialOrd, Eq, PartialEq, Copy, Clone, Serialize)]
struct Sequence(u64); struct Sequence(u64);

View File

@@ -1,22 +1,18 @@
use anyhow::{anyhow, Context}; use anyhow::{anyhow, Context};
use attachment_service::http::make_router;
use attachment_service::metrics::preinitialize_metrics;
use attachment_service::persistence::Persistence;
use attachment_service::service::{Config, Service, MAX_UNAVAILABLE_INTERVAL_DEFAULT};
use camino::Utf8PathBuf; use camino::Utf8PathBuf;
use clap::Parser; use clap::Parser;
use diesel::Connection; use diesel::Connection;
use metrics::launch_timestamp::LaunchTimestamp; use metrics::launch_timestamp::LaunchTimestamp;
use metrics::BuildInfo;
use std::sync::Arc; use std::sync::Arc;
use storage_controller::http::make_router;
use storage_controller::metrics::preinitialize_metrics;
use storage_controller::persistence::Persistence;
use storage_controller::service::{
Config, Service, MAX_UNAVAILABLE_INTERVAL_DEFAULT, RECONCILER_CONCURRENCY_DEFAULT,
};
use tokio::signal::unix::SignalKind; use tokio::signal::unix::SignalKind;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use utils::auth::{JwtAuth, SwappableJwtAuth}; use utils::auth::{JwtAuth, SwappableJwtAuth};
use utils::logging::{self, LogFormat}; use utils::logging::{self, LogFormat};
use utils::sentry_init::init_sentry;
use utils::{project_build_tag, project_git_version, tcp_listener}; use utils::{project_build_tag, project_git_version, tcp_listener};
project_git_version!(GIT_VERSION); project_git_version!(GIT_VERSION);
@@ -54,7 +50,7 @@ struct Cli {
#[arg(short, long)] #[arg(short, long)]
path: Option<Utf8PathBuf>, path: Option<Utf8PathBuf>,
/// URL to connect to postgres, like postgresql://localhost:1234/storage_controller /// URL to connect to postgres, like postgresql://localhost:1234/attachment_service
#[arg(long)] #[arg(long)]
database_url: Option<String>, database_url: Option<String>,
@@ -65,14 +61,6 @@ struct Cli {
/// Grace period before marking unresponsive pageserver offline /// Grace period before marking unresponsive pageserver offline
#[arg(long)] #[arg(long)]
max_unavailable_interval: Option<humantime::Duration>, max_unavailable_interval: Option<humantime::Duration>,
/// Maximum number of reconcilers that may run in parallel
#[arg(long)]
reconciler_concurrency: Option<usize>,
/// How long to wait for the initial database connection to be available.
#[arg(long, default_value = "5s")]
db_connect_timeout: humantime::Duration,
} }
enum StrictMode { enum StrictMode {
@@ -170,8 +158,6 @@ fn main() -> anyhow::Result<()> {
std::process::exit(1); std::process::exit(1);
})); }));
let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
tokio::runtime::Builder::new_current_thread() tokio::runtime::Builder::new_current_thread()
// We use spawn_blocking for database operations, so require approximately // We use spawn_blocking for database operations, so require approximately
// as many blocking threads as we will open database connections. // as many blocking threads as we will open database connections.
@@ -203,11 +189,6 @@ async fn async_main() -> anyhow::Result<()> {
args.listen args.listen
); );
let build_info = BuildInfo {
revision: GIT_VERSION,
build_tag: BUILD_TAG,
};
let strict_mode = if args.dev { let strict_mode = if args.dev {
StrictMode::Dev StrictMode::Dev
} else { } else {
@@ -252,14 +233,9 @@ async fn async_main() -> anyhow::Result<()> {
.max_unavailable_interval .max_unavailable_interval
.map(humantime::Duration::into) .map(humantime::Duration::into)
.unwrap_or(MAX_UNAVAILABLE_INTERVAL_DEFAULT), .unwrap_or(MAX_UNAVAILABLE_INTERVAL_DEFAULT),
reconciler_concurrency: args
.reconciler_concurrency
.unwrap_or(RECONCILER_CONCURRENCY_DEFAULT),
}; };
// After loading secrets & config, but before starting anything else, apply database migrations // After loading secrets & config, but before starting anything else, apply database migrations
Persistence::await_connection(&secrets.database_url, args.db_connect_timeout.into()).await?;
migration_run(&secrets.database_url) migration_run(&secrets.database_url)
.await .await
.context("Running database migrations")?; .context("Running database migrations")?;
@@ -274,7 +250,7 @@ async fn async_main() -> anyhow::Result<()> {
let auth = secrets let auth = secrets
.public_key .public_key
.map(|jwt_auth| Arc::new(SwappableJwtAuth::new(jwt_auth))); .map(|jwt_auth| Arc::new(SwappableJwtAuth::new(jwt_auth)));
let router = make_router(service.clone(), auth, build_info) let router = make_router(service.clone(), auth)
.build() .build()
.map_err(|err| anyhow!(err))?; .map_err(|err| anyhow!(err))?;
let router_service = utils::http::RouterService::new(router).unwrap(); let router_service = utils::http::RouterService::new(router).unwrap();

View File

@@ -8,8 +8,10 @@
//! The rest of the code defines label group types and deals with converting outer types to labels. //! The rest of the code defines label group types and deals with converting outer types to labels.
//! //!
use bytes::Bytes; use bytes::Bytes;
use measured::{label::LabelValue, metric::histogram, FixedCardinalityLabel, MetricGroup}; use measured::{
use metrics::NeonMetrics; label::{LabelValue, StaticLabelSet},
FixedCardinalityLabel, MetricGroup,
};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::sync::Mutex; use std::sync::Mutex;
@@ -24,28 +26,21 @@ pub fn preinitialize_metrics() {
pub(crate) struct StorageControllerMetrics { pub(crate) struct StorageControllerMetrics {
pub(crate) metrics_group: StorageControllerMetricGroup, pub(crate) metrics_group: StorageControllerMetricGroup,
encoder: Mutex<measured::text::BufferedTextEncoder>, encoder: Mutex<measured::text::TextEncoder>,
} }
#[derive(measured::MetricGroup)] #[derive(measured::MetricGroup)]
#[metric(new())]
pub(crate) struct StorageControllerMetricGroup { pub(crate) struct StorageControllerMetricGroup {
/// Count of how many times we spawn a reconcile task /// Count of how many times we spawn a reconcile task
pub(crate) storage_controller_reconcile_spawn: measured::Counter, pub(crate) storage_controller_reconcile_spawn: measured::Counter,
/// Reconciler tasks completed, broken down by success/failure/cancelled /// Reconciler tasks completed, broken down by success/failure/cancelled
pub(crate) storage_controller_reconcile_complete: pub(crate) storage_controller_reconcile_complete:
measured::CounterVec<ReconcileCompleteLabelGroupSet>, measured::CounterVec<ReconcileCompleteLabelGroupSet>,
/// Count of how many times we make an optimization change to a tenant's scheduling
pub(crate) storage_controller_schedule_optimization: measured::Counter,
/// HTTP request status counters for handled requests /// HTTP request status counters for handled requests
pub(crate) storage_controller_http_request_status: pub(crate) storage_controller_http_request_status:
measured::CounterVec<HttpRequestStatusLabelGroupSet>, measured::CounterVec<HttpRequestStatusLabelGroupSet>,
/// HTTP request handler latency across all status codes /// HTTP request handler latency across all status codes
#[metric(metadata = histogram::Thresholds::exponential_buckets(0.1, 2.0))]
pub(crate) storage_controller_http_request_latency: pub(crate) storage_controller_http_request_latency:
measured::HistogramVec<HttpRequestLatencyLabelGroupSet, 5>, measured::HistogramVec<HttpRequestLatencyLabelGroupSet, 5>,
@@ -57,7 +52,6 @@ pub(crate) struct StorageControllerMetricGroup {
/// Latency of HTTP requests to the pageserver, broken down by pageserver /// Latency of HTTP requests to the pageserver, broken down by pageserver
/// node id, request name and method. This include both successful and unsuccessful /// node id, request name and method. This include both successful and unsuccessful
/// requests. /// requests.
#[metric(metadata = histogram::Thresholds::exponential_buckets(0.1, 2.0))]
pub(crate) storage_controller_pageserver_request_latency: pub(crate) storage_controller_pageserver_request_latency:
measured::HistogramVec<PageserverRequestLabelGroupSet, 5>, measured::HistogramVec<PageserverRequestLabelGroupSet, 5>,
@@ -69,7 +63,6 @@ pub(crate) struct StorageControllerMetricGroup {
/// Latency of pass-through HTTP requests to the pageserver, broken down by pageserver /// Latency of pass-through HTTP requests to the pageserver, broken down by pageserver
/// node id, request name and method. This include both successful and unsuccessful /// node id, request name and method. This include both successful and unsuccessful
/// requests. /// requests.
#[metric(metadata = histogram::Thresholds::exponential_buckets(0.1, 2.0))]
pub(crate) storage_controller_passthrough_request_latency: pub(crate) storage_controller_passthrough_request_latency:
measured::HistogramVec<PageserverRequestLabelGroupSet, 5>, measured::HistogramVec<PageserverRequestLabelGroupSet, 5>,
@@ -78,34 +71,75 @@ pub(crate) struct StorageControllerMetricGroup {
measured::CounterVec<DatabaseQueryErrorLabelGroupSet>, measured::CounterVec<DatabaseQueryErrorLabelGroupSet>,
/// Latency of database queries, broken down by operation. /// Latency of database queries, broken down by operation.
#[metric(metadata = histogram::Thresholds::exponential_buckets(0.1, 2.0))]
pub(crate) storage_controller_database_query_latency: pub(crate) storage_controller_database_query_latency:
measured::HistogramVec<DatabaseQueryLatencyLabelGroupSet, 5>, measured::HistogramVec<DatabaseQueryLatencyLabelGroupSet, 5>,
} }
impl StorageControllerMetrics { impl StorageControllerMetrics {
pub(crate) fn encode(&self, neon_metrics: &NeonMetrics) -> Bytes { pub(crate) fn encode(&self) -> Bytes {
let mut encoder = self.encoder.lock().unwrap(); let mut encoder = self.encoder.lock().unwrap();
neon_metrics self.metrics_group.collect_into(&mut *encoder);
.collect_group_into(&mut *encoder)
.unwrap_or_else(|infallible| match infallible {});
self.metrics_group
.collect_group_into(&mut *encoder)
.unwrap_or_else(|infallible| match infallible {});
encoder.finish() encoder.finish()
} }
} }
impl Default for StorageControllerMetrics { impl Default for StorageControllerMetrics {
fn default() -> Self { fn default() -> Self {
let mut metrics_group = StorageControllerMetricGroup::new();
metrics_group
.storage_controller_reconcile_complete
.init_all_dense();
Self { Self {
metrics_group, metrics_group: StorageControllerMetricGroup::new(),
encoder: Mutex::new(measured::text::BufferedTextEncoder::new()), encoder: Mutex::new(measured::text::TextEncoder::new()),
}
}
}
impl StorageControllerMetricGroup {
pub(crate) fn new() -> Self {
Self {
storage_controller_reconcile_spawn: measured::Counter::new(),
storage_controller_reconcile_complete: measured::CounterVec::new(
ReconcileCompleteLabelGroupSet {
status: StaticLabelSet::new(),
},
),
storage_controller_http_request_status: measured::CounterVec::new(
HttpRequestStatusLabelGroupSet {
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
status: StaticLabelSet::new(),
},
),
storage_controller_http_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_pageserver_request_error: measured::CounterVec::new(
PageserverRequestLabelGroupSet {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
},
),
storage_controller_pageserver_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_passthrough_request_error: measured::CounterVec::new(
PageserverRequestLabelGroupSet {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
},
),
storage_controller_passthrough_request_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
storage_controller_database_query_error: measured::CounterVec::new(
DatabaseQueryErrorLabelGroupSet {
operation: StaticLabelSet::new(),
error_type: StaticLabelSet::new(),
},
),
storage_controller_database_query_latency: measured::HistogramVec::new(
measured::metric::histogram::Thresholds::exponential_buckets(0.1, 2.0),
),
} }
} }
} }
@@ -119,7 +153,7 @@ pub(crate) struct ReconcileCompleteLabelGroup {
#[derive(measured::LabelGroup)] #[derive(measured::LabelGroup)]
#[label(set = HttpRequestStatusLabelGroupSet)] #[label(set = HttpRequestStatusLabelGroupSet)]
pub(crate) struct HttpRequestStatusLabelGroup<'a> { pub(crate) struct HttpRequestStatusLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo, default)] #[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str, pub(crate) path: &'a str,
pub(crate) method: Method, pub(crate) method: Method,
pub(crate) status: StatusCode, pub(crate) status: StatusCode,
@@ -128,21 +162,40 @@ pub(crate) struct HttpRequestStatusLabelGroup<'a> {
#[derive(measured::LabelGroup)] #[derive(measured::LabelGroup)]
#[label(set = HttpRequestLatencyLabelGroupSet)] #[label(set = HttpRequestLatencyLabelGroupSet)]
pub(crate) struct HttpRequestLatencyLabelGroup<'a> { pub(crate) struct HttpRequestLatencyLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo, default)] #[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str, pub(crate) path: &'a str,
pub(crate) method: Method, pub(crate) method: Method,
} }
impl Default for HttpRequestLatencyLabelGroupSet {
fn default() -> Self {
Self {
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
}
}
}
#[derive(measured::LabelGroup, Clone)] #[derive(measured::LabelGroup, Clone)]
#[label(set = PageserverRequestLabelGroupSet)] #[label(set = PageserverRequestLabelGroupSet)]
pub(crate) struct PageserverRequestLabelGroup<'a> { pub(crate) struct PageserverRequestLabelGroup<'a> {
#[label(dynamic_with = lasso::ThreadedRodeo, default)] #[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) pageserver_id: &'a str, pub(crate) pageserver_id: &'a str,
#[label(dynamic_with = lasso::ThreadedRodeo, default)] #[label(dynamic_with = lasso::ThreadedRodeo)]
pub(crate) path: &'a str, pub(crate) path: &'a str,
pub(crate) method: Method, pub(crate) method: Method,
} }
impl Default for PageserverRequestLabelGroupSet {
fn default() -> Self {
Self {
pageserver_id: lasso::ThreadedRodeo::new(),
path: lasso::ThreadedRodeo::new(),
method: StaticLabelSet::new(),
}
}
}
#[derive(measured::LabelGroup)] #[derive(measured::LabelGroup)]
#[label(set = DatabaseQueryErrorLabelGroupSet)] #[label(set = DatabaseQueryErrorLabelGroupSet)]
pub(crate) struct DatabaseQueryErrorLabelGroup { pub(crate) struct DatabaseQueryErrorLabelGroup {
@@ -156,7 +209,7 @@ pub(crate) struct DatabaseQueryLatencyLabelGroup {
pub(crate) operation: DatabaseOperation, pub(crate) operation: DatabaseOperation,
} }
#[derive(FixedCardinalityLabel, Clone, Copy)] #[derive(FixedCardinalityLabel)]
pub(crate) enum ReconcileOutcome { pub(crate) enum ReconcileOutcome {
#[label(rename = "ok")] #[label(rename = "ok")]
Success, Success,
@@ -164,7 +217,7 @@ pub(crate) enum ReconcileOutcome {
Cancel, Cancel,
} }
#[derive(FixedCardinalityLabel, Copy, Clone)] #[derive(FixedCardinalityLabel, Clone)]
pub(crate) enum Method { pub(crate) enum Method {
Get, Get,
Put, Put,
@@ -189,12 +242,11 @@ impl From<hyper::Method> for Method {
} }
} }
#[derive(Clone, Copy)]
pub(crate) struct StatusCode(pub(crate) hyper::http::StatusCode); pub(crate) struct StatusCode(pub(crate) hyper::http::StatusCode);
impl LabelValue for StatusCode { impl LabelValue for StatusCode {
fn visit<V: measured::label::LabelVisitor>(&self, v: V) -> V::Output { fn visit<V: measured::label::LabelVisitor>(&self, v: V) -> V::Output {
v.write_int(self.0.as_u16() as i64) v.write_int(self.0.as_u16() as u64)
} }
} }
@@ -212,7 +264,7 @@ impl FixedCardinalityLabel for StatusCode {
} }
} }
#[derive(FixedCardinalityLabel, Clone, Copy)] #[derive(FixedCardinalityLabel)]
pub(crate) enum DatabaseErrorLabel { pub(crate) enum DatabaseErrorLabel {
Query, Query,
Connection, Connection,

View File

@@ -1,14 +1,13 @@
use std::{str::FromStr, time::Duration}; use std::{str::FromStr, time::Duration};
use hyper::StatusCode;
use pageserver_api::{ use pageserver_api::{
controller_api::{ controller_api::{
NodeAvailability, NodeDescribeResponse, NodeRegisterRequest, NodeSchedulingPolicy, NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, TenantLocateResponseShard,
TenantLocateResponseShard,
}, },
shard::TenantShardId, shard::TenantShardId,
}; };
use pageserver_client::mgmt_api; use pageserver_client::mgmt_api;
use reqwest::StatusCode;
use serde::Serialize; use serde::Serialize;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use utils::{backoff, id::NodeId}; use utils::{backoff, id::NodeId};
@@ -257,19 +256,6 @@ impl Node {
) )
.await .await
} }
/// Generate the simplified API-friendly description of a node's state
pub(crate) fn describe(&self) -> NodeDescribeResponse {
NodeDescribeResponse {
id: self.id,
availability: self.availability.into(),
scheduling: self.scheduling,
listen_http_addr: self.listen_http_addr.clone(),
listen_http_port: self.listen_http_port,
listen_pg_addr: self.listen_pg_addr.clone(),
listen_pg_port: self.listen_pg_port,
}
}
} }
impl std::fmt::Display for Node { impl std::fmt::Display for Node {

View File

@@ -1,14 +1,13 @@
use pageserver_api::{ use pageserver_api::{
models::{ models::{
LocationConfig, LocationConfigListResponse, PageserverUtilization, SecondaryProgress, LocationConfig, LocationConfigListResponse, PageserverUtilization, SecondaryProgress,
TenantScanRemoteStorageResponse, TenantShardSplitRequest, TenantShardSplitResponse, TenantShardSplitRequest, TenantShardSplitResponse, TimelineCreateRequest, TimelineInfo,
TimelineCreateRequest, TimelineInfo,
}, },
shard::TenantShardId, shard::TenantShardId,
}; };
use pageserver_client::mgmt_api::{Client, Result}; use pageserver_client::mgmt_api::{Client, Result};
use reqwest::StatusCode; use reqwest::StatusCode;
use utils::id::{NodeId, TenantId, TimelineId}; use utils::id::{NodeId, TimelineId};
/// Thin wrapper around [`pageserver_client::mgmt_api::Client`]. It allows the storage /// Thin wrapper around [`pageserver_client::mgmt_api::Client`]. It allows the storage
/// controller to collect metrics in a non-intrusive manner. /// controller to collect metrics in a non-intrusive manner.
@@ -89,18 +88,6 @@ impl PageserverClient {
) )
} }
pub(crate) async fn tenant_scan_remote_storage(
&self,
tenant_id: TenantId,
) -> Result<TenantScanRemoteStorageResponse> {
measured_request!(
"tenant_scan_remote_storage",
crate::metrics::Method::Get,
&self.node_id_label,
self.inner.tenant_scan_remote_storage(tenant_id).await
)
}
pub(crate) async fn tenant_secondary_download( pub(crate) async fn tenant_secondary_download(
&self, &self,
tenant_id: TenantShardId, tenant_id: TenantShardId,

View File

@@ -2,7 +2,6 @@ pub(crate) mod split_state;
use std::collections::HashMap; use std::collections::HashMap;
use std::str::FromStr; use std::str::FromStr;
use std::time::Duration; use std::time::Duration;
use std::time::Instant;
use self::split_state::SplitState; use self::split_state::SplitState;
use camino::Utf8Path; use camino::Utf8Path;
@@ -10,7 +9,6 @@ use camino::Utf8PathBuf;
use diesel::pg::PgConnection; use diesel::pg::PgConnection;
use diesel::prelude::*; use diesel::prelude::*;
use diesel::Connection; use diesel::Connection;
use pageserver_api::controller_api::ShardSchedulingPolicy;
use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy}; use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy};
use pageserver_api::models::TenantConfig; use pageserver_api::models::TenantConfig;
use pageserver_api::shard::ShardConfigError; use pageserver_api::shard::ShardConfigError;
@@ -80,7 +78,7 @@ pub(crate) enum DatabaseError {
Logical(String), Logical(String),
} }
#[derive(measured::FixedCardinalityLabel, Copy, Clone)] #[derive(measured::FixedCardinalityLabel, Clone)]
pub(crate) enum DatabaseOperation { pub(crate) enum DatabaseOperation {
InsertNode, InsertNode,
UpdateNode, UpdateNode,
@@ -109,12 +107,6 @@ pub(crate) enum AbortShardSplitStatus {
pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>; pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
/// Some methods can operate on either a whole tenant or a single shard
pub(crate) enum TenantFilter {
Tenant(TenantId),
Shard(TenantShardId),
}
impl Persistence { impl Persistence {
// The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under // The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
// normal circumstances. This assumes we have exclusive use of the database cluster to which we connect. // normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
@@ -145,41 +137,18 @@ impl Persistence {
} }
} }
/// A helper for use during startup, where we would like to tolerate concurrent restarts of the
/// database and the storage controller, therefore the database might not be available right away
pub async fn await_connection(
database_url: &str,
timeout: Duration,
) -> Result<(), diesel::ConnectionError> {
let started_at = Instant::now();
loop {
match PgConnection::establish(database_url) {
Ok(_) => {
tracing::info!("Connected to database.");
return Ok(());
}
Err(e) => {
if started_at.elapsed() > timeout {
return Err(e);
} else {
tracing::info!("Database not yet available, waiting... ({e})");
tokio::time::sleep(Duration::from_millis(100)).await;
}
}
}
}
}
/// Wraps `with_conn` in order to collect latency and error metrics /// Wraps `with_conn` in order to collect latency and error metrics
async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R> async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R>
where where
F: FnOnce(&mut PgConnection) -> DatabaseResult<R> + Send + 'static, F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
R: Send + 'static, R: Send + 'static,
{ {
let latency = &METRICS_REGISTRY let latency = &METRICS_REGISTRY
.metrics_group .metrics_group
.storage_controller_database_query_latency; .storage_controller_database_query_latency;
let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op }); let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup {
operation: op.clone(),
});
let res = self.with_conn(func).await; let res = self.with_conn(func).await;
@@ -199,7 +168,7 @@ impl Persistence {
/// Call the provided function in a tokio blocking thread, with a Diesel database connection. /// Call the provided function in a tokio blocking thread, with a Diesel database connection.
async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R> async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R>
where where
F: FnOnce(&mut PgConnection) -> DatabaseResult<R> + Send + 'static, F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
R: Send + 'static, R: Send + 'static,
{ {
let mut conn = self.connection_pool.get()?; let mut conn = self.connection_pool.get()?;
@@ -306,11 +275,6 @@ impl Persistence {
// Backward compat for test data after PR https://github.com/neondatabase/neon/pull/7165 // Backward compat for test data after PR https://github.com/neondatabase/neon/pull/7165
shard.placement_policy = "{\"Attached\":0}".to_string(); shard.placement_policy = "{\"Attached\":0}".to_string();
} }
if shard.scheduling_policy.is_empty() {
shard.scheduling_policy =
serde_json::to_string(&ShardSchedulingPolicy::default()).unwrap();
}
} }
let tenants: Vec<TenantShardPersistence> = decoded.tenants.into_values().collect(); let tenants: Vec<TenantShardPersistence> = decoded.tenants.into_values().collect();
@@ -501,45 +465,59 @@ impl Persistence {
/// that we only do the first time a tenant is set to an attached policy via /location_config. /// that we only do the first time a tenant is set to an attached policy via /location_config.
pub(crate) async fn update_tenant_shard( pub(crate) async fn update_tenant_shard(
&self, &self,
tenant: TenantFilter, tenant_shard_id: TenantShardId,
input_placement_policy: Option<PlacementPolicy>, input_placement_policy: PlacementPolicy,
input_config: Option<TenantConfig>, input_config: TenantConfig,
input_generation: Option<Generation>, input_generation: Option<Generation>,
input_scheduling_policy: Option<ShardSchedulingPolicy>,
) -> DatabaseResult<()> { ) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*; use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| { self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
let query = match tenant { let query = diesel::update(tenant_shards)
TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards) .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
.filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string())) .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
.filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32)) .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32));
.filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
.into_boxed(),
TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
.filter(tenant_id.eq(input_tenant_id.to_string()))
.into_boxed(),
};
#[derive(AsChangeset)] if let Some(input_generation) = input_generation {
#[diesel(table_name = crate::schema::tenant_shards)] // Update includes generation column
struct ShardUpdate { query
generation: Option<i32>, .set((
placement_policy: Option<String>, generation.eq(Some(input_generation.into().unwrap() as i32)),
config: Option<String>, placement_policy
scheduling_policy: Option<String>, .eq(serde_json::to_string(&input_placement_policy).unwrap()),
config.eq(serde_json::to_string(&input_config).unwrap()),
))
.execute(conn)?;
} else {
// Update does not include generation column
query
.set((
placement_policy
.eq(serde_json::to_string(&input_placement_policy).unwrap()),
config.eq(serde_json::to_string(&input_config).unwrap()),
))
.execute(conn)?;
} }
let update = ShardUpdate { Ok(())
generation: input_generation.map(|g| g.into().unwrap() as i32), })
placement_policy: input_placement_policy .await?;
.map(|p| serde_json::to_string(&p).unwrap()),
config: input_config.map(|c| serde_json::to_string(&c).unwrap()),
scheduling_policy: input_scheduling_policy
.map(|p| serde_json::to_string(&p).unwrap()),
};
query.set(update).execute(conn)?; Ok(())
}
pub(crate) async fn update_tenant_config(
&self,
input_tenant_id: TenantId,
input_config: TenantConfig,
) -> DatabaseResult<()> {
use crate::schema::tenant_shards::dsl::*;
self.with_measured_conn(DatabaseOperation::UpdateTenantConfig, move |conn| {
diesel::update(tenant_shards)
.filter(tenant_id.eq(input_tenant_id.to_string()))
.set((config.eq(serde_json::to_string(&input_config).unwrap()),))
.execute(conn)?;
Ok(()) Ok(())
}) })
@@ -720,7 +698,7 @@ impl Persistence {
} }
} }
/// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably /// Parts of [`crate::tenant_state::TenantState`] that are stored durably
#[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)] #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
#[diesel(table_name = crate::schema::tenant_shards)] #[diesel(table_name = crate::schema::tenant_shards)]
pub(crate) struct TenantShardPersistence { pub(crate) struct TenantShardPersistence {
@@ -750,8 +728,6 @@ pub(crate) struct TenantShardPersistence {
pub(crate) splitting: SplitState, pub(crate) splitting: SplitState,
#[serde(default)] #[serde(default)]
pub(crate) config: String, pub(crate) config: String,
#[serde(default)]
pub(crate) scheduling_policy: String,
} }
impl TenantShardPersistence { impl TenantShardPersistence {

View File

@@ -1,12 +1,12 @@
use crate::pageserver_client::PageserverClient; use crate::pageserver_client::PageserverClient;
use crate::persistence::Persistence; use crate::persistence::Persistence;
use crate::service; use crate::service;
use hyper::StatusCode;
use pageserver_api::models::{ use pageserver_api::models::{
LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig, LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig,
}; };
use pageserver_api::shard::{ShardIdentity, TenantShardId}; use pageserver_api::shard::{ShardIdentity, TenantShardId};
use pageserver_client::mgmt_api; use pageserver_client::mgmt_api;
use reqwest::StatusCode;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@@ -18,14 +18,14 @@ use utils::sync::gate::GateGuard;
use crate::compute_hook::{ComputeHook, NotifyError}; use crate::compute_hook::{ComputeHook, NotifyError};
use crate::node::Node; use crate::node::Node;
use crate::tenant_shard::{IntentState, ObservedState, ObservedStateLocation}; use crate::tenant_state::{IntentState, ObservedState, ObservedStateLocation};
const DEFAULT_HEATMAP_PERIOD: &str = "60s"; const DEFAULT_HEATMAP_PERIOD: &str = "60s";
/// Object with the lifetime of the background reconcile task that is created /// Object with the lifetime of the background reconcile task that is created
/// for tenants which have a difference between their intent and observed states. /// for tenants which have a difference between their intent and observed states.
pub(super) struct Reconciler { pub(super) struct Reconciler {
/// See [`crate::tenant_shard::TenantShard`] for the meanings of these fields: they are a snapshot /// See [`crate::tenant_state::TenantState`] for the meanings of these fields: they are a snapshot
/// of a tenant's state from when we spawned a reconcile task. /// of a tenant's state from when we spawned a reconcile task.
pub(super) tenant_shard_id: TenantShardId, pub(super) tenant_shard_id: TenantShardId,
pub(crate) shard: ShardIdentity, pub(crate) shard: ShardIdentity,
@@ -48,15 +48,11 @@ pub(super) struct Reconciler {
/// To avoid stalling if the cloud control plane is unavailable, we may proceed /// To avoid stalling if the cloud control plane is unavailable, we may proceed
/// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed /// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed
/// so that we can set [`crate::tenant_shard::TenantShard::pending_compute_notification`] to ensure a later retry. /// so that we can set [`crate::tenant_state::TenantState::pending_compute_notification`] to ensure a later retry.
pub(crate) compute_notify_failure: bool, pub(crate) compute_notify_failure: bool,
/// Reconciler is responsible for keeping alive semaphore units that limit concurrency on how many
/// we will spawn.
pub(crate) _resource_units: ReconcileUnits,
/// A means to abort background reconciliation: it is essential to /// A means to abort background reconciliation: it is essential to
/// call this when something changes in the original TenantShard that /// call this when something changes in the original TenantState that
/// will make this reconciliation impossible or unnecessary, for /// will make this reconciliation impossible or unnecessary, for
/// example when a pageserver node goes offline, or the PlacementPolicy for /// example when a pageserver node goes offline, or the PlacementPolicy for
/// the tenant is changed. /// the tenant is changed.
@@ -70,20 +66,7 @@ pub(super) struct Reconciler {
pub(crate) persistence: Arc<Persistence>, pub(crate) persistence: Arc<Persistence>,
} }
/// RAII resource units granted to a Reconciler, which it should keep alive until it finishes doing I/O /// This is a snapshot of [`crate::tenant_state::IntentState`], but it does not do any
pub(crate) struct ReconcileUnits {
_sem_units: tokio::sync::OwnedSemaphorePermit,
}
impl ReconcileUnits {
pub(crate) fn new(sem_units: tokio::sync::OwnedSemaphorePermit) -> Self {
Self {
_sem_units: sem_units,
}
}
}
/// This is a snapshot of [`crate::tenant_shard::IntentState`], but it does not do any
/// reference counting for Scheduler. The IntentState is what the scheduler works with, /// reference counting for Scheduler. The IntentState is what the scheduler works with,
/// and the TargetState is just the instruction for a particular Reconciler run. /// and the TargetState is just the instruction for a particular Reconciler run.
#[derive(Debug)] #[derive(Debug)]
@@ -504,7 +487,6 @@ impl Reconciler {
while let Err(e) = self.compute_notify().await { while let Err(e) = self.compute_notify().await {
match e { match e {
NotifyError::Fatal(_) => return Err(ReconcileError::Notify(e)), NotifyError::Fatal(_) => return Err(ReconcileError::Notify(e)),
NotifyError::ShuttingDown => return Err(ReconcileError::Cancel),
_ => { _ => {
tracing::warn!( tracing::warn!(
"Live migration blocked by compute notification error, retrying: {e}" "Live migration blocked by compute notification error, retrying: {e}"
@@ -767,10 +749,7 @@ impl Reconciler {
// It is up to the caller whether they want to drop out on this error, but they don't have to: // It is up to the caller whether they want to drop out on this error, but they don't have to:
// in general we should avoid letting unavailability of the cloud control plane stop us from // in general we should avoid letting unavailability of the cloud control plane stop us from
// making progress. // making progress.
if !matches!(e, NotifyError::ShuttingDown) { tracing::warn!("Failed to notify compute of attached pageserver {node}: {e}");
tracing::warn!("Failed to notify compute of attached pageserver {node}: {e}");
}
// Set this flag so that in our ReconcileResult we will set the flag on the shard that it // Set this flag so that in our ReconcileResult we will set the flag on the shard that it
// needs to retry at some point. // needs to retry at some point.
self.compute_notify_failure = true; self.compute_notify_failure = true;

View File

@@ -1,4 +1,4 @@
use crate::{node::Node, tenant_shard::TenantShard}; use crate::{node::Node, tenant_state::TenantState};
use pageserver_api::controller_api::UtilizationScore; use pageserver_api::controller_api::UtilizationScore;
use serde::Serialize; use serde::Serialize;
use std::collections::HashMap; use std::collections::HashMap;
@@ -27,7 +27,7 @@ pub enum MaySchedule {
#[derive(Serialize)] #[derive(Serialize)]
struct SchedulerNode { struct SchedulerNode {
/// How many shards are currently scheduled on this node, via their [`crate::tenant_shard::IntentState`]. /// How many shards are currently scheduled on this node, via their [`crate::tenant_state::IntentState`].
shard_count: usize, shard_count: usize,
/// Whether this node is currently elegible to have new shards scheduled (this is derived /// Whether this node is currently elegible to have new shards scheduled (this is derived
@@ -58,86 +58,6 @@ pub(crate) struct Scheduler {
nodes: HashMap<NodeId, SchedulerNode>, nodes: HashMap<NodeId, SchedulerNode>,
} }
/// Score for soft constraint scheduling: lower scores are preferred to higher scores.
///
/// For example, we may set an affinity score based on the number of shards from the same
/// tenant already on a node, to implicitly prefer to balance out shards.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub(crate) struct AffinityScore(pub(crate) usize);
impl AffinityScore {
/// If we have no anti-affinity at all toward a node, this is its score. It means
/// the scheduler has a free choice amongst nodes with this score, and may pick a node
/// based on other information such as total utilization.
pub(crate) const FREE: Self = Self(0);
pub(crate) fn inc(&mut self) {
self.0 += 1;
}
}
impl std::ops::Add for AffinityScore {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self(self.0 + rhs.0)
}
}
/// Hint for whether this is a sincere attempt to schedule, or a speculative
/// check for where we _would_ schedule (done during optimization)
#[derive(Debug)]
pub(crate) enum ScheduleMode {
Normal,
Speculative,
}
impl Default for ScheduleMode {
fn default() -> Self {
Self::Normal
}
}
// For carrying state between multiple calls to [`TenantShard::schedule`], e.g. when calling
// it for many shards in the same tenant.
#[derive(Debug, Default)]
pub(crate) struct ScheduleContext {
/// Sparse map of nodes: omitting a node implicitly makes its affinity [`AffinityScore::FREE`]
pub(crate) nodes: HashMap<NodeId, AffinityScore>,
/// Specifically how many _attached_ locations are on each node
pub(crate) attached_nodes: HashMap<NodeId, usize>,
pub(crate) mode: ScheduleMode,
}
impl ScheduleContext {
/// Input is a list of nodes we would like to avoid using again within this context. The more
/// times a node is passed into this call, the less inclined we are to use it.
pub(crate) fn avoid(&mut self, nodes: &[NodeId]) {
for node_id in nodes {
let entry = self.nodes.entry(*node_id).or_insert(AffinityScore::FREE);
entry.inc()
}
}
pub(crate) fn push_attached(&mut self, node_id: NodeId) {
let entry = self.attached_nodes.entry(node_id).or_default();
*entry += 1;
}
pub(crate) fn get_node_affinity(&self, node_id: NodeId) -> AffinityScore {
self.nodes
.get(&node_id)
.copied()
.unwrap_or(AffinityScore::FREE)
}
pub(crate) fn get_node_attachments(&self, node_id: NodeId) -> usize {
self.attached_nodes.get(&node_id).copied().unwrap_or(0)
}
}
impl Scheduler { impl Scheduler {
pub(crate) fn new<'a>(nodes: impl Iterator<Item = &'a Node>) -> Self { pub(crate) fn new<'a>(nodes: impl Iterator<Item = &'a Node>) -> Self {
let mut scheduler_nodes = HashMap::new(); let mut scheduler_nodes = HashMap::new();
@@ -163,7 +83,7 @@ impl Scheduler {
pub(crate) fn consistency_check<'a>( pub(crate) fn consistency_check<'a>(
&self, &self,
nodes: impl Iterator<Item = &'a Node>, nodes: impl Iterator<Item = &'a Node>,
shards: impl Iterator<Item = &'a TenantShard>, shards: impl Iterator<Item = &'a TenantState>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let mut expect_nodes: HashMap<NodeId, SchedulerNode> = HashMap::new(); let mut expect_nodes: HashMap<NodeId, SchedulerNode> = HashMap::new();
for node in nodes { for node in nodes {
@@ -304,87 +224,53 @@ impl Scheduler {
node.and_then(|(node_id, may_schedule)| if may_schedule { Some(node_id) } else { None }) node.and_then(|(node_id, may_schedule)| if may_schedule { Some(node_id) } else { None })
} }
/// hard_exclude: it is forbidden to use nodes in this list, typically becacuse they pub(crate) fn schedule_shard(&self, hard_exclude: &[NodeId]) -> Result<NodeId, ScheduleError> {
/// are already in use by this shard -- we use this to avoid picking the same node
/// as both attached and secondary location. This is a hard constraint: if we cannot
/// find any nodes that aren't in this list, then we will return a [`ScheduleError::ImpossibleConstraint`].
///
/// context: we prefer to avoid using nodes identified in the context, according
/// to their anti-affinity score. We use this to prefeer to avoid placing shards in
/// the same tenant on the same node. This is a soft constraint: the context will never
/// cause us to fail to schedule a shard.
pub(crate) fn schedule_shard(
&self,
hard_exclude: &[NodeId],
context: &ScheduleContext,
) -> Result<NodeId, ScheduleError> {
if self.nodes.is_empty() { if self.nodes.is_empty() {
return Err(ScheduleError::NoPageservers); return Err(ScheduleError::NoPageservers);
} }
let mut scores: Vec<(NodeId, AffinityScore, usize)> = self let mut tenant_counts: Vec<(NodeId, usize)> = self
.nodes .nodes
.iter() .iter()
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
if hard_exclude.contains(k) || v.may_schedule == MaySchedule::No { if hard_exclude.contains(k) || v.may_schedule == MaySchedule::No {
None None
} else { } else {
Some(( Some((*k, v.shard_count))
*k,
context.nodes.get(k).copied().unwrap_or(AffinityScore::FREE),
v.shard_count,
))
} }
}) })
.collect(); .collect();
// Sort by, in order of precedence: // Sort by tenant count. Nodes with the same tenant count are sorted by ID.
// 1st: Affinity score. We should never pick a higher-score node if a lower-score node is available tenant_counts.sort_by_key(|i| (i.1, i.0));
// 2nd: Utilization. Within nodes with the same affinity, use the least loaded nodes.
// 3rd: Node ID. This is a convenience to make selection deterministic in tests and empty systems.
scores.sort_by_key(|i| (i.1, i.2, i.0));
if scores.is_empty() { if tenant_counts.is_empty() {
// After applying constraints, no pageservers were left. // After applying constraints, no pageservers were left. We log some detail about
if !matches!(context.mode, ScheduleMode::Speculative) { // the state of nodes to help understand why this happened. This is not logged as an error because
// If this was not a speculative attempt, log details to understand why we couldn't // it is legitimately possible for enough nodes to be Offline to prevent scheduling a shard.
// schedule: this may help an engineer understand if some nodes are marked offline tracing::info!("Scheduling failure, while excluding {hard_exclude:?}, node states:");
// in a way that's preventing progress. for (node_id, node) in &self.nodes {
tracing::info!( tracing::info!(
"Scheduling failure, while excluding {hard_exclude:?}, node states:" "Node {node_id}: may_schedule={} shards={}",
node.may_schedule != MaySchedule::No,
node.shard_count
); );
for (node_id, node) in &self.nodes {
tracing::info!(
"Node {node_id}: may_schedule={} shards={}",
node.may_schedule != MaySchedule::No,
node.shard_count
);
}
} }
return Err(ScheduleError::ImpossibleConstraint); return Err(ScheduleError::ImpossibleConstraint);
} }
// Lowest score wins let node_id = tenant_counts.first().unwrap().0;
let node_id = scores.first().unwrap().0; tracing::info!(
"scheduler selected node {node_id} (elegible nodes {:?}, exclude: {hard_exclude:?})",
if !matches!(context.mode, ScheduleMode::Speculative) { tenant_counts.iter().map(|i| i.0 .0).collect::<Vec<_>>()
tracing::info!(
"scheduler selected node {node_id} (elegible nodes {:?}, hard exclude: {hard_exclude:?}, soft exclude: {context:?})",
scores.iter().map(|i| i.0 .0).collect::<Vec<_>>()
); );
}
// Note that we do not update shard count here to reflect the scheduling: that // Note that we do not update shard count here to reflect the scheduling: that
// is IntentState's job when the scheduled location is used. // is IntentState's job when the scheduled location is used.
Ok(node_id) Ok(node_id)
} }
/// Unit test access to internal state
#[cfg(test)]
pub(crate) fn get_node_shard_count(&self, node_id: NodeId) -> usize {
self.nodes.get(&node_id).unwrap().shard_count
}
} }
#[cfg(test)] #[cfg(test)]
@@ -421,7 +307,7 @@ pub(crate) mod test_utils {
mod tests { mod tests {
use super::*; use super::*;
use crate::tenant_shard::IntentState; use crate::tenant_state::IntentState;
#[test] #[test]
fn scheduler_basic() -> anyhow::Result<()> { fn scheduler_basic() -> anyhow::Result<()> {
let nodes = test_utils::make_test_nodes(2); let nodes = test_utils::make_test_nodes(2);
@@ -430,17 +316,15 @@ mod tests {
let mut t1_intent = IntentState::new(); let mut t1_intent = IntentState::new();
let mut t2_intent = IntentState::new(); let mut t2_intent = IntentState::new();
let context = ScheduleContext::default(); let scheduled = scheduler.schedule_shard(&[])?;
let scheduled = scheduler.schedule_shard(&[], &context)?;
t1_intent.set_attached(&mut scheduler, Some(scheduled)); t1_intent.set_attached(&mut scheduler, Some(scheduled));
let scheduled = scheduler.schedule_shard(&[], &context)?; let scheduled = scheduler.schedule_shard(&[])?;
t2_intent.set_attached(&mut scheduler, Some(scheduled)); t2_intent.set_attached(&mut scheduler, Some(scheduled));
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1);
assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1); assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1);
let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers(), &context)?; let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers())?;
t1_intent.push_secondary(&mut scheduler, scheduled); t1_intent.push_secondary(&mut scheduler, scheduled);
assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1);

View File

@@ -22,7 +22,6 @@ diesel::table! {
placement_policy -> Varchar, placement_policy -> Varchar,
splitting -> Int2, splitting -> Int2,
config -> Text, config -> Text,
scheduling_policy -> Varchar,
} }
} }

View File

@@ -86,10 +86,7 @@ where
.stdout(process_log_file) .stdout(process_log_file)
.stderr(same_file_for_stderr) .stderr(same_file_for_stderr)
.args(args); .args(args);
let filled_cmd = fill_remote_storage_secrets_vars(fill_rust_env_vars(background_command));
let filled_cmd = fill_env_vars_prefixed_neon(fill_remote_storage_secrets_vars(
fill_rust_env_vars(background_command),
));
filled_cmd.envs(envs); filled_cmd.envs(envs);
let pid_file_to_check = match &initial_pid_file { let pid_file_to_check = match &initial_pid_file {
@@ -271,15 +268,6 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
cmd cmd
} }
fn fill_env_vars_prefixed_neon(mut cmd: &mut Command) -> &mut Command {
for (var, val) in std::env::vars() {
if var.starts_with("NEON_PAGESERVER_") {
cmd = cmd.env(var, val);
}
}
cmd
}
/// Add a `pre_exec` to the cmd that, inbetween fork() and exec(), /// Add a `pre_exec` to the cmd that, inbetween fork() and exec(),
/// 1. Claims a pidfile with a fcntl lock on it and /// 1. Claims a pidfile with a fcntl lock on it and
/// 2. Sets up the pidfile's file descriptor so that it (and the lock) /// 2. Sets up the pidfile's file descriptor so that it (and the lock)

View File

@@ -14,7 +14,9 @@ use control_plane::pageserver::{PageServerNode, PAGESERVER_REMOTE_STORAGE_DIR};
use control_plane::safekeeper::SafekeeperNode; use control_plane::safekeeper::SafekeeperNode;
use control_plane::storage_controller::StorageController; use control_plane::storage_controller::StorageController;
use control_plane::{broker, local_env}; use control_plane::{broker, local_env};
use pageserver_api::controller_api::PlacementPolicy; use pageserver_api::controller_api::{
NodeAvailability, NodeConfigureRequest, NodeSchedulingPolicy, PlacementPolicy,
};
use pageserver_api::models::{ use pageserver_api::models::{
ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo, ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo,
}; };
@@ -417,54 +419,6 @@ async fn handle_tenant(
println!("{} {:?}", t.id, t.state); println!("{} {:?}", t.id, t.state);
} }
} }
Some(("import", import_match)) => {
let tenant_id = parse_tenant_id(import_match)?.unwrap_or_else(TenantId::generate);
let storage_controller = StorageController::from_env(env);
let create_response = storage_controller.tenant_import(tenant_id).await?;
let shard_zero = create_response
.shards
.first()
.expect("Import response omitted shards");
let attached_pageserver_id = shard_zero.node_id;
let pageserver =
PageServerNode::from_env(env, env.get_pageserver_conf(attached_pageserver_id)?);
println!(
"Imported tenant {tenant_id}, attached to pageserver {attached_pageserver_id}"
);
let timelines = pageserver
.http_client
.list_timelines(shard_zero.shard_id)
.await?;
// Pick a 'main' timeline that has no ancestors, the rest will get arbitrary names
let main_timeline = timelines
.iter()
.find(|t| t.ancestor_timeline_id.is_none())
.expect("No timelines found")
.timeline_id;
let mut branch_i = 0;
for timeline in timelines.iter() {
let branch_name = if timeline.timeline_id == main_timeline {
"main".to_string()
} else {
branch_i += 1;
format!("branch_{branch_i}")
};
println!(
"Importing timeline {tenant_id}/{} as branch {branch_name}",
timeline.timeline_id
);
env.register_branch_mapping(branch_name, tenant_id, timeline.timeline_id)?;
}
}
Some(("create", create_match)) => { Some(("create", create_match)) => {
let tenant_conf: HashMap<_, _> = create_match let tenant_conf: HashMap<_, _> = create_match
.get_many::<String>("config") .get_many::<String>("config")
@@ -1106,6 +1060,21 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
} }
} }
Some(("set-state", subcommand_args)) => {
let pageserver = get_pageserver(env, subcommand_args)?;
let scheduling = subcommand_args.get_one("scheduling");
let availability = subcommand_args.get_one("availability");
let storage_controller = StorageController::from_env(env);
storage_controller
.node_configure(NodeConfigureRequest {
node_id: pageserver.conf.id,
scheduling: scheduling.cloned(),
availability: availability.cloned(),
})
.await?;
}
Some(("status", subcommand_args)) => { Some(("status", subcommand_args)) => {
match get_pageserver(env, subcommand_args)?.check_status().await { match get_pageserver(env, subcommand_args)?.check_status().await {
Ok(_) => println!("Page server is up and running"), Ok(_) => println!("Page server is up and running"),
@@ -1279,7 +1248,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
match ComputeControlPlane::load(env.clone()) { match ComputeControlPlane::load(env.clone()) {
Ok(cplane) => { Ok(cplane) => {
for (_k, node) in cplane.endpoints { for (_k, node) in cplane.endpoints {
if let Err(e) = node.stop(if immediate { "immediate" } else { "fast" }, false) { if let Err(e) = node.stop(if immediate { "immediate" } else { "fast " }, false) {
eprintln!("postgres stop failed: {e:#}"); eprintln!("postgres stop failed: {e:#}");
} }
} }
@@ -1465,7 +1434,6 @@ fn cli() -> Command {
.subcommand( .subcommand(
Command::new("timeline") Command::new("timeline")
.about("Manage timelines") .about("Manage timelines")
.arg_required_else_help(true)
.subcommand(Command::new("list") .subcommand(Command::new("list")
.about("List all timelines, available to this pageserver") .about("List all timelines, available to this pageserver")
.arg(tenant_id_arg.clone())) .arg(tenant_id_arg.clone()))
@@ -1528,8 +1496,6 @@ fn cli() -> Command {
.subcommand(Command::new("config") .subcommand(Command::new("config")
.arg(tenant_id_arg.clone()) .arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))) .arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false)))
.subcommand(Command::new("import").arg(tenant_id_arg.clone().required(true))
.about("Import a tenant that is present in remote storage, and create branches for its timelines"))
) )
.subcommand( .subcommand(
Command::new("pageserver") Command::new("pageserver")
@@ -1549,13 +1515,19 @@ fn cli() -> Command {
.about("Restart local pageserver") .about("Restart local pageserver")
.arg(pageserver_config_args.clone()) .arg(pageserver_config_args.clone())
) )
.subcommand(Command::new("set-state")
.arg(Arg::new("availability").value_parser(value_parser!(NodeAvailability)).long("availability").action(ArgAction::Set).help("Availability state: offline,active"))
.arg(Arg::new("scheduling").value_parser(value_parser!(NodeSchedulingPolicy)).long("scheduling").action(ArgAction::Set).help("Scheduling state: draining,pause,filling,active"))
.about("Set scheduling or availability state of pageserver node")
.arg(pageserver_config_args.clone())
)
) )
.subcommand( .subcommand(
Command::new("storage_controller") Command::new("storage_controller")
.arg_required_else_help(true) .arg_required_else_help(true)
.about("Manage storage_controller") .about("Manage storage_controller")
.subcommand(Command::new("start").about("Start storage controller")) .subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(Command::new("stop").about("Stop storage controller") .subcommand(Command::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone())) .arg(stop_mode_arg.clone()))
) )
.subcommand( .subcommand(

View File

@@ -17,7 +17,6 @@ use std::net::Ipv4Addr;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::{Command, Stdio}; use std::process::{Command, Stdio};
use std::time::Duration;
use utils::{ use utils::{
auth::{encode_from_key_file, Claims}, auth::{encode_from_key_file, Claims},
id::{NodeId, TenantId, TenantTimelineId, TimelineId}, id::{NodeId, TenantId, TenantTimelineId, TimelineId},
@@ -67,10 +66,6 @@ pub struct LocalEnv {
pub broker: NeonBroker, pub broker: NeonBroker,
// Configuration for the storage controller (1 per neon_local environment)
#[serde(default)]
pub storage_controller: NeonStorageControllerConf,
/// This Vec must always contain at least one pageserver /// This Vec must always contain at least one pageserver
pub pageservers: Vec<PageServerConf>, pub pageservers: Vec<PageServerConf>,
@@ -103,29 +98,6 @@ pub struct NeonBroker {
pub listen_addr: SocketAddr, pub listen_addr: SocketAddr,
} }
/// Broker config for cluster internal communication.
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
#[serde(default)]
pub struct NeonStorageControllerConf {
/// Heartbeat timeout before marking a node offline
#[serde(with = "humantime_serde")]
pub max_unavailable: Duration,
}
impl NeonStorageControllerConf {
// Use a shorter pageserver unavailability interval than the default to speed up tests.
const DEFAULT_MAX_UNAVAILABLE_INTERVAL: std::time::Duration =
std::time::Duration::from_secs(10);
}
impl Default for NeonStorageControllerConf {
fn default() -> Self {
Self {
max_unavailable: Self::DEFAULT_MAX_UNAVAILABLE_INTERVAL,
}
}
}
// Dummy Default impl to satisfy Deserialize derive. // Dummy Default impl to satisfy Deserialize derive.
impl Default for NeonBroker { impl Default for NeonBroker {
fn default() -> Self { fn default() -> Self {
@@ -157,8 +129,6 @@ pub struct PageServerConf {
pub(crate) virtual_file_io_engine: Option<String>, pub(crate) virtual_file_io_engine: Option<String>,
pub(crate) get_vectored_impl: Option<String>, pub(crate) get_vectored_impl: Option<String>,
pub(crate) get_impl: Option<String>,
pub(crate) validate_vectored_get: Option<bool>,
} }
impl Default for PageServerConf { impl Default for PageServerConf {
@@ -171,8 +141,6 @@ impl Default for PageServerConf {
http_auth_type: AuthType::Trust, http_auth_type: AuthType::Trust,
virtual_file_io_engine: None, virtual_file_io_engine: None,
get_vectored_impl: None, get_vectored_impl: None,
get_impl: None,
validate_vectored_get: None,
} }
} }
} }
@@ -188,7 +156,6 @@ pub struct SafekeeperConf {
pub remote_storage: Option<String>, pub remote_storage: Option<String>,
pub backup_threads: Option<u32>, pub backup_threads: Option<u32>,
pub auth_enabled: bool, pub auth_enabled: bool,
pub listen_addr: Option<String>,
} }
impl Default for SafekeeperConf { impl Default for SafekeeperConf {
@@ -202,7 +169,6 @@ impl Default for SafekeeperConf {
remote_storage: None, remote_storage: None,
backup_threads: None, backup_threads: None,
auth_enabled: false, auth_enabled: false,
listen_addr: None,
} }
} }
} }

View File

@@ -92,8 +92,6 @@ impl PageServerNode {
http_auth_type, http_auth_type,
virtual_file_io_engine, virtual_file_io_engine,
get_vectored_impl, get_vectored_impl,
get_impl,
validate_vectored_get,
} = &self.conf; } = &self.conf;
let id = format!("id={}", id); let id = format!("id={}", id);
@@ -113,16 +111,6 @@ impl PageServerNode {
} else { } else {
String::new() String::new()
}; };
let get_impl = if let Some(get_impl) = get_impl {
format!("get_impl='{get_impl}'")
} else {
String::new()
};
let validate_vectored_get = if let Some(validate_vectored_get) = validate_vectored_get {
format!("validate_vectored_get={validate_vectored_get}")
} else {
String::new()
};
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url()); let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
@@ -136,8 +124,6 @@ impl PageServerNode {
broker_endpoint_param, broker_endpoint_param,
virtual_file_io_engine, virtual_file_io_engine,
get_vectored_impl, get_vectored_impl,
get_impl,
validate_vectored_get,
]; ];
if let Some(control_plane_api) = &self.env.control_plane_api { if let Some(control_plane_api) = &self.env.control_plane_api {
@@ -403,10 +389,6 @@ impl PageServerNode {
.remove("image_creation_threshold") .remove("image_creation_threshold")
.map(|x| x.parse::<usize>()) .map(|x| x.parse::<usize>())
.transpose()?, .transpose()?,
image_layer_creation_check_threshold: settings
.remove("image_layer_creation_check_threshold")
.map(|x| x.parse::<u8>())
.transpose()?,
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()), pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
walreceiver_connect_timeout: settings walreceiver_connect_timeout: settings
.remove("walreceiver_connect_timeout") .remove("walreceiver_connect_timeout")
@@ -448,11 +430,6 @@ impl PageServerNode {
.map(serde_json::from_str) .map(serde_json::from_str)
.transpose() .transpose()
.context("parse `timeline_get_throttle` from json")?, .context("parse `timeline_get_throttle` from json")?,
switch_to_aux_file_v2: settings
.remove("switch_to_aux_file_v2")
.map(|x| x.parse::<bool>())
.transpose()
.context("Failed to parse 'switch_to_aux_file_v2' as bool")?,
}; };
if !settings.is_empty() { if !settings.is_empty() {
bail!("Unrecognized tenant settings: {settings:?}") bail!("Unrecognized tenant settings: {settings:?}")
@@ -524,12 +501,6 @@ impl PageServerNode {
.map(|x| x.parse::<usize>()) .map(|x| x.parse::<usize>())
.transpose() .transpose()
.context("Failed to parse 'image_creation_threshold' as non zero integer")?, .context("Failed to parse 'image_creation_threshold' as non zero integer")?,
image_layer_creation_check_threshold: settings
.remove("image_layer_creation_check_threshold")
.map(|x| x.parse::<u8>())
.transpose()
.context("Failed to parse 'image_creation_check_threshold' as integer")?,
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()), pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
walreceiver_connect_timeout: settings walreceiver_connect_timeout: settings
.remove("walreceiver_connect_timeout") .remove("walreceiver_connect_timeout")
@@ -571,11 +542,6 @@ impl PageServerNode {
.map(serde_json::from_str) .map(serde_json::from_str)
.transpose() .transpose()
.context("parse `timeline_get_throttle` from json")?, .context("parse `timeline_get_throttle` from json")?,
switch_to_aux_file_v2: settings
.remove("switch_to_aux_file_v2")
.map(|x| x.parse::<bool>())
.transpose()
.context("Failed to parse 'switch_to_aux_file_v2' as bool")?,
} }
}; };

View File

@@ -70,31 +70,24 @@ pub struct SafekeeperNode {
pub pg_connection_config: PgConnectionConfig, pub pg_connection_config: PgConnectionConfig,
pub env: LocalEnv, pub env: LocalEnv,
pub http_client: reqwest::Client, pub http_client: reqwest::Client,
pub listen_addr: String,
pub http_base_url: String, pub http_base_url: String,
} }
impl SafekeeperNode { impl SafekeeperNode {
pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode { pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode {
let listen_addr = if let Some(ref listen_addr) = conf.listen_addr {
listen_addr.clone()
} else {
"127.0.0.1".to_string()
};
SafekeeperNode { SafekeeperNode {
id: conf.id, id: conf.id,
conf: conf.clone(), conf: conf.clone(),
pg_connection_config: Self::safekeeper_connection_config(&listen_addr, conf.pg_port), pg_connection_config: Self::safekeeper_connection_config(conf.pg_port),
env: env.clone(), env: env.clone(),
http_client: reqwest::Client::new(), http_client: reqwest::Client::new(),
http_base_url: format!("http://{}:{}/v1", listen_addr, conf.http_port), http_base_url: format!("http://127.0.0.1:{}/v1", conf.http_port),
listen_addr,
} }
} }
/// Construct libpq connection string for connecting to this safekeeper. /// Construct libpq connection string for connecting to this safekeeper.
fn safekeeper_connection_config(addr: &str, port: u16) -> PgConnectionConfig { fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
PgConnectionConfig::new_host_port(url::Host::parse(addr).unwrap(), port) PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port)
} }
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf { pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
@@ -118,8 +111,8 @@ impl SafekeeperNode {
); );
io::stdout().flush().unwrap(); io::stdout().flush().unwrap();
let listen_pg = format!("{}:{}", self.listen_addr, self.conf.pg_port); let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
let listen_http = format!("{}:{}", self.listen_addr, self.conf.http_port); let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
let id = self.id; let id = self.id;
let datadir = self.datadir_path(); let datadir = self.datadir_path();
@@ -146,7 +139,7 @@ impl SafekeeperNode {
availability_zone, availability_zone,
]; ];
if let Some(pg_tenant_only_port) = self.conf.pg_tenant_only_port { if let Some(pg_tenant_only_port) = self.conf.pg_tenant_only_port {
let listen_pg_tenant_only = format!("{}:{}", self.listen_addr, pg_tenant_only_port); let listen_pg_tenant_only = format!("127.0.0.1:{}", pg_tenant_only_port);
args.extend(["--listen-pg-tenant-only".to_owned(), listen_pg_tenant_only]); args.extend(["--listen-pg-tenant-only".to_owned(), listen_pg_tenant_only]);
} }
if !self.conf.sync { if !self.conf.sync {

View File

@@ -1,8 +1,6 @@
use crate::{ use crate::{background_process, local_env::LocalEnv};
background_process,
local_env::{LocalEnv, NeonStorageControllerConf},
};
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use hyper::Method;
use pageserver_api::{ use pageserver_api::{
controller_api::{ controller_api::{
NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse, NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse,
@@ -16,7 +14,6 @@ use pageserver_api::{
}; };
use pageserver_client::mgmt_api::ResponseErrorMessageExt; use pageserver_client::mgmt_api::ResponseErrorMessageExt;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use reqwest::Method;
use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{fs, str::FromStr}; use std::{fs, str::FromStr};
use tokio::process::Command; use tokio::process::Command;
@@ -35,13 +32,15 @@ pub struct StorageController {
public_key: Option<String>, public_key: Option<String>,
postgres_port: u16, postgres_port: u16,
client: reqwest::Client, client: reqwest::Client,
config: NeonStorageControllerConf,
} }
const COMMAND: &str = "storage_controller"; const COMMAND: &str = "storage_controller";
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
// Use a shorter pageserver unavailability interval than the default to speed up tests.
const NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct AttachHookRequest { pub struct AttachHookRequest {
pub tenant_shard_id: TenantShardId, pub tenant_shard_id: TenantShardId,
@@ -136,7 +135,6 @@ impl StorageController {
client: reqwest::ClientBuilder::new() client: reqwest::ClientBuilder::new()
.build() .build()
.expect("Failed to construct http client"), .expect("Failed to construct http client"),
config: env.storage_controller.clone(),
} }
} }
@@ -274,6 +272,8 @@ impl StorageController {
// Run migrations on every startup, in case something changed. // Run migrations on every startup, in case something changed.
let database_url = self.setup_database().await?; let database_url = self.setup_database().await?;
let max_unavailable: humantime::Duration = NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL.into();
let mut args = vec![ let mut args = vec![
"-l", "-l",
&self.listen, &self.listen,
@@ -283,7 +283,7 @@ impl StorageController {
"--database-url", "--database-url",
&database_url, &database_url,
"--max-unavailable-interval", "--max-unavailable-interval",
&humantime::Duration::from(self.config.max_unavailable).to_string(), &max_unavailable.to_string(),
] ]
.into_iter() .into_iter()
.map(|s| s.to_string()) .map(|s| s.to_string())
@@ -379,7 +379,7 @@ impl StorageController {
/// Simple HTTP request wrapper for calling into storage controller /// Simple HTTP request wrapper for calling into storage controller
async fn dispatch<RQ, RS>( async fn dispatch<RQ, RS>(
&self, &self,
method: reqwest::Method, method: hyper::Method,
path: String, path: String,
body: Option<RQ>, body: Option<RQ>,
) -> anyhow::Result<RS> ) -> anyhow::Result<RS>
@@ -472,16 +472,6 @@ impl StorageController {
.await .await
} }
#[instrument(skip(self))]
pub async fn tenant_import(&self, tenant_id: TenantId) -> anyhow::Result<TenantCreateResponse> {
self.dispatch::<(), TenantCreateResponse>(
Method::POST,
format!("debug/v1/tenant/{tenant_id}/import"),
None,
)
.await
}
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> { pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
self.dispatch::<(), _>( self.dispatch::<(), _>(

View File

@@ -1,23 +0,0 @@
[package]
name = "storcon_cli"
version = "0.1.0"
edition.workspace = true
license.workspace = true
[dependencies]
anyhow.workspace = true
clap.workspace = true
comfy-table.workspace = true
hyper.workspace = true
pageserver_api.workspace = true
pageserver_client.workspace = true
reqwest.workspace = true
serde.workspace = true
serde_json = { workspace = true, features = ["raw_value"] }
thiserror.workspace = true
tokio.workspace = true
tracing.workspace = true
utils.workspace = true
workspace_hack.workspace = true

View File

@@ -1,680 +0,0 @@
use std::{collections::HashMap, str::FromStr, time::Duration};
use clap::{Parser, Subcommand};
use pageserver_api::{
controller_api::{
NodeAvailabilityWrapper, NodeDescribeResponse, ShardSchedulingPolicy,
TenantDescribeResponse, TenantPolicyRequest,
},
models::{
LocationConfigSecondary, ShardParameters, TenantConfig, TenantConfigRequest,
TenantCreateRequest, TenantShardSplitRequest, TenantShardSplitResponse,
},
shard::{ShardStripeSize, TenantShardId},
};
use pageserver_client::mgmt_api::{self, ResponseErrorMessageExt};
use reqwest::{Method, StatusCode, Url};
use serde::{de::DeserializeOwned, Serialize};
use utils::id::{NodeId, TenantId};
use pageserver_api::controller_api::{
NodeConfigureRequest, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
TenantLocateResponse, TenantShardMigrateRequest, TenantShardMigrateResponse,
};
#[derive(Subcommand, Debug)]
enum Command {
/// Register a pageserver with the storage controller. This shouldn't usually be necessary,
/// since pageservers auto-register when they start up
NodeRegister {
#[arg(long)]
node_id: NodeId,
#[arg(long)]
listen_pg_addr: String,
#[arg(long)]
listen_pg_port: u16,
#[arg(long)]
listen_http_addr: String,
#[arg(long)]
listen_http_port: u16,
},
/// Modify a node's configuration in the storage controller
NodeConfigure {
#[arg(long)]
node_id: NodeId,
/// Availability is usually auto-detected based on heartbeats. Set 'offline' here to
/// manually mark a node offline
#[arg(long)]
availability: Option<NodeAvailabilityArg>,
/// Scheduling policy controls whether tenant shards may be scheduled onto this node.
#[arg(long)]
scheduling: Option<NodeSchedulingPolicy>,
},
/// Modify a tenant's policies in the storage controller
TenantPolicy {
#[arg(long)]
tenant_id: TenantId,
/// Placement policy controls whether a tenant is `detached`, has only a secondary location (`secondary`),
/// or is in the normal attached state with N secondary locations (`attached:N`)
#[arg(long)]
placement: Option<PlacementPolicyArg>,
/// Scheduling policy enables pausing the controller's scheduling activity involving this tenant. `active` is normal,
/// `essential` disables optimization scheduling changes, `pause` disables all scheduling changes, and `stop` prevents
/// all reconciliation activity including for scheduling changes already made. `pause` and `stop` can make a tenant
/// unavailable, and are only for use in emergencies.
#[arg(long)]
scheduling: Option<ShardSchedulingPolicyArg>,
},
/// List nodes known to the storage controller
Nodes {},
/// List tenants known to the storage controller
Tenants {},
/// Create a new tenant in the storage controller, and by extension on pageservers.
TenantCreate {
#[arg(long)]
tenant_id: TenantId,
},
/// Delete a tenant in the storage controller, and by extension on pageservers.
TenantDelete {
#[arg(long)]
tenant_id: TenantId,
},
/// Split an existing tenant into a higher number of shards than its current shard count.
TenantShardSplit {
#[arg(long)]
tenant_id: TenantId,
#[arg(long)]
shard_count: u8,
/// Optional, in 8kiB pages. e.g. set 2048 for 16MB stripes.
#[arg(long)]
stripe_size: Option<u32>,
},
/// Migrate the attached location for a tenant shard to a specific pageserver.
TenantShardMigrate {
#[arg(long)]
tenant_shard_id: TenantShardId,
#[arg(long)]
node: NodeId,
},
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
/// that is passed through to pageservers, and does not affect storage controller behavior.
TenantConfig {
#[arg(long)]
tenant_id: TenantId,
#[arg(long)]
config: String,
},
/// Attempt to balance the locations for a tenant across pageservers. This is a client-side
/// alternative to the storage controller's scheduling optimization behavior.
TenantScatter {
#[arg(long)]
tenant_id: TenantId,
},
/// Print details about a particular tenant, including all its shards' states.
TenantDescribe {
#[arg(long)]
tenant_id: TenantId,
},
/// For a tenant which hasn't been onboarded to the storage controller yet, add it in secondary
/// mode so that it can warm up content on a pageserver.
TenantWarmup {
#[arg(long)]
tenant_id: TenantId,
},
}
#[derive(Parser)]
#[command(
author,
version,
about,
long_about = "CLI for Storage Controller Support/Debug"
)]
#[command(arg_required_else_help(true))]
struct Cli {
#[arg(long)]
/// URL to storage controller. e.g. http://127.0.0.1:1234 when using `neon_local`
api: Url,
#[arg(long)]
/// JWT token for authenticating with storage controller. Depending on the API used, this
/// should have either `pageserverapi` or `admin` scopes: for convenience, you should mint
/// a token with both scopes to use with this tool.
jwt: Option<String>,
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Clone)]
struct PlacementPolicyArg(PlacementPolicy);
impl FromStr for PlacementPolicyArg {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"detached" => Ok(Self(PlacementPolicy::Detached)),
"secondary" => Ok(Self(PlacementPolicy::Secondary)),
_ if s.starts_with("attached:") => {
let mut splitter = s.split(':');
let _prefix = splitter.next().unwrap();
match splitter.next().and_then(|s| s.parse::<usize>().ok()) {
Some(n) => Ok(Self(PlacementPolicy::Attached(n))),
None => Err(anyhow::anyhow!(
"Invalid format '{s}', a valid example is 'attached:1'"
)),
}
}
_ => Err(anyhow::anyhow!(
"Unknown placement policy '{s}', try detached,secondary,attached:<n>"
)),
}
}
}
#[derive(Debug, Clone)]
struct ShardSchedulingPolicyArg(ShardSchedulingPolicy);
impl FromStr for ShardSchedulingPolicyArg {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"active" => Ok(Self(ShardSchedulingPolicy::Active)),
"essential" => Ok(Self(ShardSchedulingPolicy::Essential)),
"pause" => Ok(Self(ShardSchedulingPolicy::Pause)),
"stop" => Ok(Self(ShardSchedulingPolicy::Stop)),
_ => Err(anyhow::anyhow!(
"Unknown scheduling policy '{s}', try active,essential,pause,stop"
)),
}
}
}
#[derive(Debug, Clone)]
struct NodeAvailabilityArg(NodeAvailabilityWrapper);
impl FromStr for NodeAvailabilityArg {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"active" => Ok(Self(NodeAvailabilityWrapper::Active)),
"offline" => Ok(Self(NodeAvailabilityWrapper::Offline)),
_ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
}
}
}
struct Client {
base_url: Url,
jwt_token: Option<String>,
client: reqwest::Client,
}
impl Client {
fn new(base_url: Url, jwt_token: Option<String>) -> Self {
Self {
base_url,
jwt_token,
client: reqwest::ClientBuilder::new()
.build()
.expect("Failed to construct http client"),
}
}
/// Simple HTTP request wrapper for calling into storage controller
async fn dispatch<RQ, RS>(
&self,
method: Method,
path: String,
body: Option<RQ>,
) -> mgmt_api::Result<RS>
where
RQ: Serialize + Sized,
RS: DeserializeOwned + Sized,
{
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
// for general purpose API access.
let url = Url::from_str(&format!(
"http://{}:{}/{path}",
self.base_url.host_str().unwrap(),
self.base_url.port().unwrap()
))
.unwrap();
let mut builder = self.client.request(method, url);
if let Some(body) = body {
builder = builder.json(&body)
}
if let Some(jwt_token) = &self.jwt_token {
builder = builder.header(
reqwest::header::AUTHORIZATION,
format!("Bearer {jwt_token}"),
);
}
let response = builder.send().await.map_err(mgmt_api::Error::ReceiveBody)?;
let response = response.error_from_body().await?;
response
.json()
.await
.map_err(pageserver_client::mgmt_api::Error::ReceiveBody)
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let storcon_client = Client::new(cli.api.clone(), cli.jwt.clone());
let mut trimmed = cli.api.to_string();
trimmed.pop();
let vps_client = mgmt_api::Client::new(trimmed, cli.jwt.as_deref());
match cli.command {
Command::NodeRegister {
node_id,
listen_pg_addr,
listen_pg_port,
listen_http_addr,
listen_http_port,
} => {
storcon_client
.dispatch::<_, ()>(
Method::POST,
"control/v1/node".to_string(),
Some(NodeRegisterRequest {
node_id,
listen_pg_addr,
listen_pg_port,
listen_http_addr,
listen_http_port,
}),
)
.await?;
}
Command::TenantCreate { tenant_id } => {
vps_client
.tenant_create(&TenantCreateRequest {
new_tenant_id: TenantShardId::unsharded(tenant_id),
generation: None,
shard_parameters: ShardParameters::default(),
placement_policy: Some(PlacementPolicy::Attached(1)),
config: TenantConfig::default(),
})
.await?;
}
Command::TenantDelete { tenant_id } => {
let status = vps_client
.tenant_delete(TenantShardId::unsharded(tenant_id))
.await?;
tracing::info!("Delete status: {}", status);
}
Command::Nodes {} => {
let resp = storcon_client
.dispatch::<(), Vec<NodeDescribeResponse>>(
Method::GET,
"control/v1/node".to_string(),
None,
)
.await?;
let mut table = comfy_table::Table::new();
table.set_header(["Id", "Hostname", "Scheduling", "Availability"]);
for node in resp {
table.add_row([
format!("{}", node.id),
node.listen_http_addr,
format!("{:?}", node.scheduling),
format!("{:?}", node.availability),
]);
}
println!("{table}");
}
Command::NodeConfigure {
node_id,
availability,
scheduling,
} => {
let req = NodeConfigureRequest {
node_id,
availability: availability.map(|a| a.0),
scheduling,
};
storcon_client
.dispatch::<_, ()>(
Method::PUT,
format!("control/v1/node/{node_id}/config"),
Some(req),
)
.await?;
}
Command::Tenants {} => {
let resp = storcon_client
.dispatch::<(), Vec<TenantDescribeResponse>>(
Method::GET,
"control/v1/tenant".to_string(),
None,
)
.await?;
let mut table = comfy_table::Table::new();
table.set_header([
"TenantId",
"ShardCount",
"StripeSize",
"Placement",
"Scheduling",
]);
for tenant in resp {
let shard_zero = tenant.shards.into_iter().next().unwrap();
table.add_row([
format!("{}", tenant.tenant_id),
format!("{}", shard_zero.tenant_shard_id.shard_count.literal()),
format!("{:?}", tenant.stripe_size),
format!("{:?}", tenant.policy),
format!("{:?}", shard_zero.scheduling_policy),
]);
}
println!("{table}");
}
Command::TenantPolicy {
tenant_id,
placement,
scheduling,
} => {
let req = TenantPolicyRequest {
scheduling: scheduling.map(|s| s.0),
placement: placement.map(|p| p.0),
};
storcon_client
.dispatch::<_, ()>(
Method::PUT,
format!("control/v1/tenant/{tenant_id}/policy"),
Some(req),
)
.await?;
}
Command::TenantShardSplit {
tenant_id,
shard_count,
stripe_size,
} => {
let req = TenantShardSplitRequest {
new_shard_count: shard_count,
new_stripe_size: stripe_size.map(ShardStripeSize),
};
let response = storcon_client
.dispatch::<TenantShardSplitRequest, TenantShardSplitResponse>(
Method::PUT,
format!("control/v1/tenant/{tenant_id}/shard_split"),
Some(req),
)
.await?;
println!(
"Split tenant {} into {} shards: {}",
tenant_id,
shard_count,
response
.new_shards
.iter()
.map(|s| format!("{:?}", s))
.collect::<Vec<_>>()
.join(",")
);
}
Command::TenantShardMigrate {
tenant_shard_id,
node,
} => {
let req = TenantShardMigrateRequest {
tenant_shard_id,
node_id: node,
};
storcon_client
.dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
Method::PUT,
format!("control/v1/tenant/{tenant_shard_id}/migrate"),
Some(req),
)
.await?;
}
Command::TenantConfig { tenant_id, config } => {
let tenant_conf = serde_json::from_str(&config)?;
vps_client
.tenant_config(&TenantConfigRequest {
tenant_id,
config: tenant_conf,
})
.await?;
}
Command::TenantScatter { tenant_id } => {
// Find the shards
let locate_response = storcon_client
.dispatch::<(), TenantLocateResponse>(
Method::GET,
format!("control/v1/tenant/{tenant_id}/locate"),
None,
)
.await?;
let shards = locate_response.shards;
let mut node_to_shards: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
let shard_count = shards.len();
for s in shards {
let entry = node_to_shards.entry(s.node_id).or_default();
entry.push(s.shard_id);
}
// Load list of available nodes
let nodes_resp = storcon_client
.dispatch::<(), Vec<NodeDescribeResponse>>(
Method::GET,
"control/v1/node".to_string(),
None,
)
.await?;
for node in nodes_resp {
if matches!(node.availability, NodeAvailabilityWrapper::Active) {
node_to_shards.entry(node.id).or_default();
}
}
let max_shard_per_node = shard_count / node_to_shards.len();
loop {
let mut migrate_shard = None;
for shards in node_to_shards.values_mut() {
if shards.len() > max_shard_per_node {
// Pick the emptiest
migrate_shard = Some(shards.pop().unwrap());
}
}
let Some(migrate_shard) = migrate_shard else {
break;
};
// Pick the emptiest node to migrate to
let mut destinations = node_to_shards
.iter()
.map(|(k, v)| (k, v.len()))
.collect::<Vec<_>>();
destinations.sort_by_key(|i| i.1);
let (destination_node, destination_count) = *destinations.first().unwrap();
if destination_count + 1 > max_shard_per_node {
// Even the emptiest destination doesn't have space: we're done
break;
}
let destination_node = *destination_node;
node_to_shards
.get_mut(&destination_node)
.unwrap()
.push(migrate_shard);
println!("Migrate {} -> {} ...", migrate_shard, destination_node);
storcon_client
.dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
Method::PUT,
format!("control/v1/tenant/{migrate_shard}/migrate"),
Some(TenantShardMigrateRequest {
tenant_shard_id: migrate_shard,
node_id: destination_node,
}),
)
.await?;
println!("Migrate {} -> {} OK", migrate_shard, destination_node);
}
// Spread the shards across the nodes
}
Command::TenantDescribe { tenant_id } => {
let describe_response = storcon_client
.dispatch::<(), TenantDescribeResponse>(
Method::GET,
format!("control/v1/tenant/{tenant_id}"),
None,
)
.await?;
let shards = describe_response.shards;
let mut table = comfy_table::Table::new();
table.set_header(["Shard", "Attached", "Secondary", "Last error", "status"]);
for shard in shards {
let secondary = shard
.node_secondary
.iter()
.map(|n| format!("{}", n))
.collect::<Vec<_>>()
.join(",");
let mut status_parts = Vec::new();
if shard.is_reconciling {
status_parts.push("reconciling");
}
if shard.is_pending_compute_notification {
status_parts.push("pending_compute");
}
if shard.is_splitting {
status_parts.push("splitting");
}
let status = status_parts.join(",");
table.add_row([
format!("{}", shard.tenant_shard_id),
shard
.node_attached
.map(|n| format!("{}", n))
.unwrap_or(String::new()),
secondary,
shard.last_error,
status,
]);
}
println!("{table}");
}
Command::TenantWarmup { tenant_id } => {
let describe_response = storcon_client
.dispatch::<(), TenantDescribeResponse>(
Method::GET,
format!("control/v1/tenant/{tenant_id}"),
None,
)
.await;
match describe_response {
Ok(describe) => {
if matches!(describe.policy, PlacementPolicy::Secondary) {
// Fine: it's already known to controller in secondary mode: calling
// again to put it into secondary mode won't cause problems.
} else {
anyhow::bail!("Tenant already present with policy {:?}", describe.policy);
}
}
Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _)) => {
// Fine: this tenant isn't know to the storage controller yet.
}
Err(e) => {
// Unexpected API error
return Err(e.into());
}
}
vps_client
.location_config(
TenantShardId::unsharded(tenant_id),
pageserver_api::models::LocationConfig {
mode: pageserver_api::models::LocationConfigMode::Secondary,
generation: None,
secondary_conf: Some(LocationConfigSecondary { warm: true }),
shard_number: 0,
shard_count: 0,
shard_stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE.0,
tenant_conf: TenantConfig::default(),
},
None,
true,
)
.await?;
let describe_response = storcon_client
.dispatch::<(), TenantDescribeResponse>(
Method::GET,
format!("control/v1/tenant/{tenant_id}"),
None,
)
.await?;
let secondary_ps_id = describe_response
.shards
.first()
.unwrap()
.node_secondary
.first()
.unwrap();
println!("Tenant {tenant_id} warming up on pageserver {secondary_ps_id}");
loop {
let (status, progress) = vps_client
.tenant_secondary_download(
TenantShardId::unsharded(tenant_id),
Some(Duration::from_secs(10)),
)
.await?;
println!(
"Progress: {}/{} layers, {}/{} bytes",
progress.layers_downloaded,
progress.layers_total,
progress.bytes_downloaded,
progress.bytes_total
);
match status {
StatusCode::OK => {
println!("Download complete");
break;
}
StatusCode::ACCEPTED => {
// Loop
}
_ => {
anyhow::bail!("Unexpected download status: {status}");
}
}
}
}
}
Ok(())
}

View File

@@ -2,8 +2,8 @@
# see https://diesel.rs/guides/configuring-diesel-cli # see https://diesel.rs/guides/configuring-diesel-cli
[print_schema] [print_schema]
file = "storage_controller/src/schema.rs" file = "control_plane/attachment_service/src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId"] custom_type_derives = ["diesel::query_builder::QueryId"]
[migrations_directory] [migrations_directory]
dir = "storage_controller/migrations" dir = "control_plane/attachment_service/migrations"

View File

@@ -7,11 +7,6 @@ Below you will find a brief overview of each subdir in the source tree in alphab
Neon storage broker, providing messaging between safekeepers and pageservers. Neon storage broker, providing messaging between safekeepers and pageservers.
[storage_broker.md](./storage_broker.md) [storage_broker.md](./storage_broker.md)
`storage_controller`:
Neon storage controller, manages a cluster of pageservers and exposes an API that enables
managing a many-sharded tenant as a single entity.
`/control_plane`: `/control_plane`:
Local control plane. Local control plane.

View File

@@ -1,150 +0,0 @@
# Storage Controller
## Concepts
The storage controller sits between administrative API clients and pageservers, and handles the details of mapping tenants to pageserver tenant shards. For example, creating a tenant is one API call to the storage controller,
which is mapped into many API calls to many pageservers (for multiple shards, and for secondary locations).
It implements a pageserver-compatible API that may be used for CRUD operations on tenants and timelines, translating these requests into appropriate operations on the shards within a tenant, which may be on many different pageservers. Using this API, the storage controller may be used in the same way as the pageserver's administrative HTTP API, hiding
the underlying details of how data is spread across multiple nodes.
The storage controller also manages generations, high availability (via secondary locations) and live migrations for tenants under its management. This is done with a reconciliation loop pattern, where tenants have an “intent” state and a “reconcile” task that tries to make the outside world match the intent.
## APIs
The storage controllers HTTP server implements four logically separate APIs:
- `/v1/...` path is the pageserver-compatible API. This has to be at the path root because thats where clients expect to find it on a pageserver.
- `/control/v1/...` path is the storage controllers API, which enables operations such as registering and management pageservers, or executing shard splits.
- `/debug/v1/...` path contains endpoints which are either exclusively used in tests, or are for use by engineers when supporting a deployed system.
- `/upcall/v1/...` path contains endpoints that are called by pageservers. This includes the `/re-attach` and `/validate` APIs used by pageservers
to ensure data safety with generation numbers.
The API is authenticated with a JWT token, and tokens must have scope `pageserverapi` (i.e. the same scope as pageservers APIs).
See the `http.rs` file in the source for where the HTTP APIs are implemented.
## Database
The storage controller uses a postgres database to persist a subset of its state. Note that the storage controller does _not_ keep all its state in the database: this is a design choice to enable most operations to be done efficiently in memory, rather than having to read from the database. See `persistence.rs` for a more comprehensive comment explaining what we do and do not persist: a useful metaphor is that we persist objects like tenants and nodes, but we do not
persist the _relationships_ between them: the attachment state of a tenant's shards to nodes is kept in memory and
rebuilt on startup.
The file `persistence.rs` contains all the code for accessing the database, and has a large doc comment that goes into more detail about exactly what we persist and why.
The `diesel` crate is used for defining models & migrations.
Running a local cluster with `cargo neon` automatically starts a vanilla postgress process to host the storage controllers database.
### Diesel tip: migrations
If you need to modify the database schema, heres how to create a migration:
- Install the diesel CLI with `cargo install diesel_cli`
- Use `diesel migration generate <name>` to create a new migration
- Populate the SQL files in the `migrations/` subdirectory
- Use `DATABASE_URL=... diesel migration run` to apply the migration you just wrote: this will update the `[schema.rs](http://schema.rs)` file automatically.
- This requires a running database: the easiest way to do that is to just run `cargo neon init ; cargo neon start`, which will leave a database available at `postgresql://localhost:1235/attachment_service`
- Commit the migration files and the changes to schema.rs
- If you need to iterate, you can rewind migrations with `diesel migration revert -a` and then `diesel migration run` again.
- The migrations are build into the storage controller binary, and automatically run at startup after it is deployed, so once youve committed a migration no further steps are needed.
## storcon_cli
The `storcon_cli` tool enables interactive management of the storage controller. This is usually
only necessary for debug, but may also be used to manage nodes (e.g. marking a node as offline).
`storcon_cli --help` includes details on commands.
# Deploying
This section is aimed at engineers deploying the storage controller outside of Neon's cloud platform, as
part of a self-hosted system.
_General note: since the default `neon_local` environment includes a storage controller, this is a useful
reference when figuring out deployment._
## Database
It is **essential** that the database used by the storage controller is durable (**do not store it on ephemeral
local disk**). This database contains pageserver generation numbers, which are essential to data safety on the pageserver.
The resource requirements for the database are very low: a single CPU core and 1GiB of memory should work well for most deployments. The physical size of the database is typically under a gigabyte.
Set the URL to the database using the `--database-url` CLI option.
There is no need to run migrations manually: the storage controller automatically applies migrations
when it starts up.
## Configure pageservers to use the storage controller
1. The pageserver `control_plane_api` and `control_plane_api_token` should be set in the `pageserver.toml` file. The API setting should
point to the "upcall" prefix, for example `http://127.0.0.1:1234/upcall/v1/` is used in neon_local clusters.
2. Create a `metadata.json` file in the same directory as `pageserver.toml`: this enables the pageserver to automatically register itself
with the storage controller when it starts up. See the example below for the format of this file.
### Example `metadata.json`
```
{"host":"acmehost.localdomain","http_host":"acmehost.localdomain","http_port":9898,"port":64000}
```
- `port` and `host` refer to the _postgres_ port and host, and these must be accessible from wherever
postgres runs.
- `http_port` and `http_host` refer to the pageserver's HTTP api, this must be accessible from where
the storage controller runs.
## Handle compute notifications.
The storage controller independently moves tenant attachments between pageservers in response to
changes such as a pageserver node becoming unavailable, or the tenant's shard count changing. To enable
postgres clients to handle such changes, the storage controller calls an API hook when a tenant's pageserver
location changes.
The hook is configured using the storage controller's `--compute-hook-url` CLI option. If the hook requires
JWT auth, the token may be provided with `--control-plane-jwt-token`. The hook will be invoked with a `PUT` request.
In the Neon cloud service, this hook is implemented by Neon's internal cloud control plane. In `neon_local` systems
the storage controller integrates directly with neon_local to reconfigure local postgres processes instead of calling
the compute hook.
When implementing an on-premise Neon deployment, you must implement a service that handles the compute hook. This is not complicated:
the request body has format of the `ComputeHookNotifyRequest` structure, provided below for convenience.
```
struct ComputeHookNotifyRequestShard {
node_id: NodeId,
shard_number: ShardNumber,
}
struct ComputeHookNotifyRequest {
tenant_id: TenantId,
stripe_size: Option<ShardStripeSize>,
shards: Vec<ComputeHookNotifyRequestShard>,
}
```
When a notification is received:
1. Modify postgres configuration for this tenant:
- set `neon.pageserver_connstr` to a comma-separated list of postgres connection strings to pageservers according to the `shards` list. The
shards identified by `NodeId` must be converted to the address+port of the node.
- if stripe_size is not None, set `neon.stripe_size` to this value
2. Send SIGHUP to postgres to reload configuration
3. Respond with 200 to the notification request. Do not return success if postgres was not updated: if an error is returned, the controller
will retry the notification until it succeeds..
### Example notification body
```
{
"tenant_id": "1f359dd625e519a1a4e8d7509690f6fc",
"stripe_size": 32768,
"shards": [
{"node_id": 344, "shard_number": 0},
{"node_id": 722, "shard_number": 1},
],
}
```

View File

@@ -10,13 +10,11 @@ libc.workspace = true
once_cell.workspace = true once_cell.workspace = true
chrono.workspace = true chrono.workspace = true
twox-hash.workspace = true twox-hash.workspace = true
measured.workspace = true
workspace_hack.workspace = true workspace_hack.workspace = true
[target.'cfg(target_os = "linux")'.dependencies] [target.'cfg(target_os = "linux")'.dependencies]
procfs.workspace = true procfs.workspace = true
measured-process.workspace = true
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"

View File

@@ -7,19 +7,14 @@
//! use significantly less memory than this, but can only approximate the cardinality. //! use significantly less memory than this, but can only approximate the cardinality.
use std::{ use std::{
hash::{BuildHasher, BuildHasherDefault, Hash}, collections::HashMap,
sync::atomic::AtomicU8, hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
sync::{atomic::AtomicU8, Arc, RwLock},
}; };
use measured::{ use prometheus::{
label::{LabelGroupVisitor, LabelName, LabelValue, LabelVisitor}, core::{self, Describer},
metric::{ proto, Opts,
group::{Encoding, MetricValue},
name::MetricNameEncoder,
Metric, MetricType, MetricVec,
},
text::TextEncoder,
LabelGroup,
}; };
use twox_hash::xxh3; use twox_hash::xxh3;
@@ -98,25 +93,203 @@ macro_rules! register_hll {
/// ``` /// ```
/// ///
/// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha /// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha
pub type HyperLogLogVec<L, const N: usize> = MetricVec<HyperLogLogState<N>, L>; #[derive(Clone)]
pub type HyperLogLog<const N: usize> = Metric<HyperLogLogState<N>>; pub struct HyperLogLogVec<const N: usize> {
core: Arc<HyperLogLogVecCore<N>>,
pub struct HyperLogLogState<const N: usize> {
shards: [AtomicU8; N],
} }
impl<const N: usize> Default for HyperLogLogState<N> {
fn default() -> Self { struct HyperLogLogVecCore<const N: usize> {
#[allow(clippy::declare_interior_mutable_const)] pub children: RwLock<HashMap<u64, HyperLogLog<N>, BuildHasherDefault<xxh3::Hash64>>>,
const ZERO: AtomicU8 = AtomicU8::new(0); pub desc: core::Desc,
Self { shards: [ZERO; N] } pub opts: Opts,
}
impl<const N: usize> core::Collector for HyperLogLogVec<N> {
fn desc(&self) -> Vec<&core::Desc> {
vec![&self.core.desc]
}
fn collect(&self) -> Vec<proto::MetricFamily> {
let mut m = proto::MetricFamily::default();
m.set_name(self.core.desc.fq_name.clone());
m.set_help(self.core.desc.help.clone());
m.set_field_type(proto::MetricType::GAUGE);
let mut metrics = Vec::new();
for child in self.core.children.read().unwrap().values() {
child.core.collect_into(&mut metrics);
}
m.set_metric(metrics);
vec![m]
} }
} }
impl<const N: usize> MetricType for HyperLogLogState<N> { impl<const N: usize> HyperLogLogVec<N> {
type Metadata = (); /// Create a new [`HyperLogLogVec`] based on the provided
/// [`Opts`] and partitioned by the given label names. At least one label name must be
/// provided.
pub fn new(opts: Opts, label_names: &[&str]) -> prometheus::Result<Self> {
assert!(N.is_power_of_two());
let variable_names = label_names.iter().map(|s| (*s).to_owned()).collect();
let opts = opts.variable_labels(variable_names);
let desc = opts.describe()?;
let v = HyperLogLogVecCore {
children: RwLock::new(HashMap::default()),
desc,
opts,
};
Ok(Self { core: Arc::new(v) })
}
/// `get_metric_with_label_values` returns the [`HyperLogLog<P>`] for the given slice
/// of label values (same order as the VariableLabels in Desc). If that combination of
/// label values is accessed for the first time, a new [`HyperLogLog<P>`] is created.
///
/// An error is returned if the number of label values is not the same as the
/// number of VariableLabels in Desc.
pub fn get_metric_with_label_values(
&self,
vals: &[&str],
) -> prometheus::Result<HyperLogLog<N>> {
self.core.get_metric_with_label_values(vals)
}
/// `with_label_values` works as `get_metric_with_label_values`, but panics if an error
/// occurs.
pub fn with_label_values(&self, vals: &[&str]) -> HyperLogLog<N> {
self.get_metric_with_label_values(vals).unwrap()
}
} }
impl<const N: usize> HyperLogLogState<N> { impl<const N: usize> HyperLogLogVecCore<N> {
pub fn get_metric_with_label_values(
&self,
vals: &[&str],
) -> prometheus::Result<HyperLogLog<N>> {
let h = self.hash_label_values(vals)?;
if let Some(metric) = self.children.read().unwrap().get(&h).cloned() {
return Ok(metric);
}
self.get_or_create_metric(h, vals)
}
pub(crate) fn hash_label_values(&self, vals: &[&str]) -> prometheus::Result<u64> {
if vals.len() != self.desc.variable_labels.len() {
return Err(prometheus::Error::InconsistentCardinality {
expect: self.desc.variable_labels.len(),
got: vals.len(),
});
}
let mut h = xxh3::Hash64::default();
for val in vals {
h.write(val.as_bytes());
}
Ok(h.finish())
}
fn get_or_create_metric(
&self,
hash: u64,
label_values: &[&str],
) -> prometheus::Result<HyperLogLog<N>> {
let mut children = self.children.write().unwrap();
// Check exist first.
if let Some(metric) = children.get(&hash).cloned() {
return Ok(metric);
}
let metric = HyperLogLog::with_opts_and_label_values(&self.opts, label_values)?;
children.insert(hash, metric.clone());
Ok(metric)
}
}
/// HLL is a probabilistic cardinality measure.
///
/// How to use this time-series for a metric name `my_metrics_total_hll`:
///
/// ```promql
/// # harmonic mean
/// 1 / (
/// sum (
/// 2 ^ -(
/// # HLL merge operation
/// max (my_metrics_total_hll{}) by (hll_shard, other_labels...)
/// )
/// ) without (hll_shard)
/// )
/// * alpha
/// * shards_count
/// * shards_count
/// ```
///
/// If you want an estimate over time, you can use the following query:
///
/// ```promql
/// # harmonic mean
/// 1 / (
/// sum (
/// 2 ^ -(
/// # HLL merge operation
/// max (
/// max_over_time(my_metrics_total_hll{}[$__rate_interval])
/// ) by (hll_shard, other_labels...)
/// )
/// ) without (hll_shard)
/// )
/// * alpha
/// * shards_count
/// * shards_count
/// ```
///
/// In the case of low cardinality, you might want to use the linear counting approximation:
///
/// ```promql
/// # LinearCounting(m, V) = m log (m / V)
/// shards_count * ln(shards_count /
/// # calculate V = how many shards contain a 0
/// count(max (proxy_connecting_endpoints{}) by (hll_shard, protocol) == 0) without (hll_shard)
/// )
/// ```
///
/// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha
#[derive(Clone)]
pub struct HyperLogLog<const N: usize> {
core: Arc<HyperLogLogCore<N>>,
}
impl<const N: usize> HyperLogLog<N> {
/// Create a [`HyperLogLog`] with the `name` and `help` arguments.
pub fn new<S1: Into<String>, S2: Into<String>>(name: S1, help: S2) -> prometheus::Result<Self> {
assert!(N.is_power_of_two());
let opts = Opts::new(name, help);
Self::with_opts(opts)
}
/// Create a [`HyperLogLog`] with the `opts` options.
pub fn with_opts(opts: Opts) -> prometheus::Result<Self> {
Self::with_opts_and_label_values(&opts, &[])
}
fn with_opts_and_label_values(opts: &Opts, label_values: &[&str]) -> prometheus::Result<Self> {
let desc = opts.describe()?;
let labels = make_label_pairs(&desc, label_values)?;
let v = HyperLogLogCore {
shards: [0; N].map(AtomicU8::new),
desc,
labels,
};
Ok(Self { core: Arc::new(v) })
}
pub fn measure(&self, item: &impl Hash) { pub fn measure(&self, item: &impl Hash) {
// changing the hasher will break compatibility with previous measurements. // changing the hasher will break compatibility with previous measurements.
self.record(BuildHasherDefault::<xxh3::Hash64>::default().hash_one(item)); self.record(BuildHasherDefault::<xxh3::Hash64>::default().hash_one(item));
@@ -126,11 +299,42 @@ impl<const N: usize> HyperLogLogState<N> {
let p = N.ilog2() as u8; let p = N.ilog2() as u8;
let j = hash & (N as u64 - 1); let j = hash & (N as u64 - 1);
let rho = (hash >> p).leading_zeros() as u8 + 1 - p; let rho = (hash >> p).leading_zeros() as u8 + 1 - p;
self.shards[j as usize].fetch_max(rho, std::sync::atomic::Ordering::Relaxed); self.core.shards[j as usize].fetch_max(rho, std::sync::atomic::Ordering::Relaxed);
}
}
struct HyperLogLogCore<const N: usize> {
shards: [AtomicU8; N],
desc: core::Desc,
labels: Vec<proto::LabelPair>,
}
impl<const N: usize> core::Collector for HyperLogLog<N> {
fn desc(&self) -> Vec<&core::Desc> {
vec![&self.core.desc]
} }
fn take_sample(&self) -> [u8; N] { fn collect(&self) -> Vec<proto::MetricFamily> {
self.shards.each_ref().map(|x| { let mut m = proto::MetricFamily::default();
m.set_name(self.core.desc.fq_name.clone());
m.set_help(self.core.desc.help.clone());
m.set_field_type(proto::MetricType::GAUGE);
let mut metrics = Vec::new();
self.core.collect_into(&mut metrics);
m.set_metric(metrics);
vec![m]
}
}
impl<const N: usize> HyperLogLogCore<N> {
fn collect_into(&self, metrics: &mut Vec<proto::Metric>) {
self.shards.iter().enumerate().for_each(|(i, x)| {
let mut shard_label = proto::LabelPair::default();
shard_label.set_name("hll_shard".to_owned());
shard_label.set_value(format!("{i}"));
// We reset the counter to 0 so we can perform a cardinality measure over any time slice in prometheus. // We reset the counter to 0 so we can perform a cardinality measure over any time slice in prometheus.
// This seems like it would be a race condition, // This seems like it would be a race condition,
@@ -140,90 +344,85 @@ impl<const N: usize> HyperLogLogState<N> {
// TODO: maybe we shouldn't reset this on every collect, instead, only after a time window. // TODO: maybe we shouldn't reset this on every collect, instead, only after a time window.
// this would mean that a dev port-forwarding the metrics url won't break the sampling. // this would mean that a dev port-forwarding the metrics url won't break the sampling.
x.swap(0, std::sync::atomic::Ordering::Relaxed) let v = x.swap(0, std::sync::atomic::Ordering::Relaxed);
let mut m = proto::Metric::default();
let mut c = proto::Gauge::default();
c.set_value(v as f64);
m.set_gauge(c);
let mut labels = Vec::with_capacity(self.labels.len() + 1);
labels.extend_from_slice(&self.labels);
labels.push(shard_label);
m.set_label(labels);
metrics.push(m);
}) })
} }
} }
impl<W: std::io::Write, const N: usize> measured::metric::MetricEncoding<TextEncoder<W>>
for HyperLogLogState<N> fn make_label_pairs(
{ desc: &core::Desc,
fn write_type( label_values: &[&str],
name: impl MetricNameEncoder, ) -> prometheus::Result<Vec<proto::LabelPair>> {
enc: &mut TextEncoder<W>, if desc.variable_labels.len() != label_values.len() {
) -> Result<(), std::io::Error> { return Err(prometheus::Error::InconsistentCardinality {
enc.write_type(&name, measured::text::MetricType::Gauge) expect: desc.variable_labels.len(),
got: label_values.len(),
});
} }
fn collect_into(
&self,
_: &(),
labels: impl LabelGroup,
name: impl MetricNameEncoder,
enc: &mut TextEncoder<W>,
) -> Result<(), std::io::Error> {
struct I64(i64);
impl LabelValue for I64 {
fn visit<V: LabelVisitor>(&self, v: V) -> V::Output {
v.write_int(self.0)
}
}
struct HllShardLabel { let total_len = desc.variable_labels.len() + desc.const_label_pairs.len();
hll_shard: i64, if total_len == 0 {
} return Ok(vec![]);
impl LabelGroup for HllShardLabel {
fn visit_values(&self, v: &mut impl LabelGroupVisitor) {
const LE: &LabelName = LabelName::from_str("hll_shard");
v.write_value(LE, &I64(self.hll_shard));
}
}
self.take_sample()
.into_iter()
.enumerate()
.try_for_each(|(hll_shard, val)| {
enc.write_metric_value(
name.by_ref(),
labels.by_ref().compose_with(HllShardLabel {
hll_shard: hll_shard as i64,
}),
MetricValue::Int(val as i64),
)
})
} }
if desc.variable_labels.is_empty() {
return Ok(desc.const_label_pairs.clone());
}
let mut label_pairs = Vec::with_capacity(total_len);
for (i, n) in desc.variable_labels.iter().enumerate() {
let mut label_pair = proto::LabelPair::default();
label_pair.set_name(n.clone());
label_pair.set_value(label_values[i].to_owned());
label_pairs.push(label_pair);
}
for label_pair in &desc.const_label_pairs {
label_pairs.push(label_pair.clone());
}
label_pairs.sort();
Ok(label_pairs)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashSet; use std::collections::HashSet;
use measured::{label::StaticLabelSet, FixedCardinalityLabel}; use prometheus::{proto, Opts};
use rand::{rngs::StdRng, Rng, SeedableRng}; use rand::{rngs::StdRng, Rng, SeedableRng};
use rand_distr::{Distribution, Zipf}; use rand_distr::{Distribution, Zipf};
use crate::HyperLogLogVec; use crate::HyperLogLogVec;
#[derive(FixedCardinalityLabel, Clone, Copy)] fn collect(hll: &HyperLogLogVec<32>) -> Vec<proto::Metric> {
#[label(singleton = "x")] let mut metrics = vec![];
enum Label { hll.core
A, .children
B, .read()
.unwrap()
.values()
.for_each(|c| c.core.collect_into(&mut metrics));
metrics
} }
fn get_cardinality(metrics: &[proto::Metric], filter: impl Fn(&proto::Metric) -> bool) -> f64 {
fn collect(hll: &HyperLogLogVec<StaticLabelSet<Label>, 32>) -> ([u8; 32], [u8; 32]) {
// cannot go through the `hll.collect_family_into` interface yet...
// need to see if I can fix the conflicting impls problem in measured.
(
hll.get_metric(hll.with_labels(Label::A)).take_sample(),
hll.get_metric(hll.with_labels(Label::B)).take_sample(),
)
}
fn get_cardinality(samples: &[[u8; 32]]) -> f64 {
let mut buckets = [0.0; 32]; let mut buckets = [0.0; 32];
for &sample in samples { for metric in metrics.chunks_exact(32) {
for (i, m) in sample.into_iter().enumerate() { if filter(&metric[0]) {
buckets[i] = f64::max(buckets[i], m as f64); for (i, m) in metric.iter().enumerate() {
buckets[i] = f64::max(buckets[i], m.get_gauge().get_value());
}
} }
} }
@@ -238,7 +437,7 @@ mod tests {
} }
fn test_cardinality(n: usize, dist: impl Distribution<f64>) -> ([usize; 3], [f64; 3]) { fn test_cardinality(n: usize, dist: impl Distribution<f64>) -> ([usize; 3], [f64; 3]) {
let hll = HyperLogLogVec::<StaticLabelSet<Label>, 32>::new(); let hll = HyperLogLogVec::<32>::new(Opts::new("foo", "bar"), &["x"]).unwrap();
let mut iter = StdRng::seed_from_u64(0x2024_0112).sample_iter(dist); let mut iter = StdRng::seed_from_u64(0x2024_0112).sample_iter(dist);
let mut set_a = HashSet::new(); let mut set_a = HashSet::new();
@@ -246,20 +445,18 @@ mod tests {
for x in iter.by_ref().take(n) { for x in iter.by_ref().take(n) {
set_a.insert(x.to_bits()); set_a.insert(x.to_bits());
hll.get_metric(hll.with_labels(Label::A)) hll.with_label_values(&["a"]).measure(&x.to_bits());
.measure(&x.to_bits());
} }
for x in iter.by_ref().take(n) { for x in iter.by_ref().take(n) {
set_b.insert(x.to_bits()); set_b.insert(x.to_bits());
hll.get_metric(hll.with_labels(Label::B)) hll.with_label_values(&["b"]).measure(&x.to_bits());
.measure(&x.to_bits());
} }
let merge = &set_a | &set_b; let merge = &set_a | &set_b;
let (a, b) = collect(&hll); let metrics = collect(&hll);
let len = get_cardinality(&[a, b]); let len = get_cardinality(&metrics, |_| true);
let len_a = get_cardinality(&[a]); let len_a = get_cardinality(&metrics, |l| l.get_label()[0].get_value() == "a");
let len_b = get_cardinality(&[b]); let len_b = get_cardinality(&metrics, |l| l.get_label()[0].get_value() == "b");
([merge.len(), set_a.len(), set_b.len()], [len, len_a, len_b]) ([merge.len(), set_a.len(), set_b.len()], [len, len_a, len_b])
} }

View File

@@ -4,17 +4,6 @@
//! a default registry. //! a default registry.
#![deny(clippy::undocumented_unsafe_blocks)] #![deny(clippy::undocumented_unsafe_blocks)]
use measured::{
label::{LabelGroupSet, LabelGroupVisitor, LabelName, NoLabels},
metric::{
counter::CounterState,
gauge::GaugeState,
group::{Encoding, MetricValue},
name::{MetricName, MetricNameEncoder},
MetricEncoding, MetricFamilyEncoding,
},
FixedCardinalityLabel, LabelGroup, MetricGroup,
};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use prometheus::core::{ use prometheus::core::{
Atomic, AtomicU64, Collector, GenericCounter, GenericCounterVec, GenericGauge, GenericGaugeVec, Atomic, AtomicU64, Collector, GenericCounter, GenericCounterVec, GenericGauge, GenericGaugeVec,
@@ -22,7 +11,6 @@ use prometheus::core::{
pub use prometheus::opts; pub use prometheus::opts;
pub use prometheus::register; pub use prometheus::register;
pub use prometheus::Error; pub use prometheus::Error;
use prometheus::Registry;
pub use prometheus::{core, default_registry, proto}; pub use prometheus::{core, default_registry, proto};
pub use prometheus::{exponential_buckets, linear_buckets}; pub use prometheus::{exponential_buckets, linear_buckets};
pub use prometheus::{register_counter_vec, Counter, CounterVec}; pub use prometheus::{register_counter_vec, Counter, CounterVec};
@@ -35,12 +23,13 @@ pub use prometheus::{register_int_counter_vec, IntCounterVec};
pub use prometheus::{register_int_gauge, IntGauge}; pub use prometheus::{register_int_gauge, IntGauge};
pub use prometheus::{register_int_gauge_vec, IntGaugeVec}; pub use prometheus::{register_int_gauge_vec, IntGaugeVec};
pub use prometheus::{Encoder, TextEncoder}; pub use prometheus::{Encoder, TextEncoder};
use prometheus::{Registry, Result};
pub mod launch_timestamp; pub mod launch_timestamp;
mod wrappers; mod wrappers;
pub use wrappers::{CountedReader, CountedWriter}; pub use wrappers::{CountedReader, CountedWriter};
mod hll; mod hll;
pub use hll::{HyperLogLog, HyperLogLogState, HyperLogLogVec}; pub use hll::{HyperLogLog, HyperLogLogVec};
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod more_process_metrics; pub mod more_process_metrics;
@@ -70,7 +59,7 @@ static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new);
/// Register a collector in the internal registry. MUST be called before the first call to `gather()`. /// Register a collector in the internal registry. MUST be called before the first call to `gather()`.
/// Otherwise, we can have a deadlock in the `gather()` call, trying to register a new collector /// Otherwise, we can have a deadlock in the `gather()` call, trying to register a new collector
/// while holding the lock. /// while holding the lock.
pub fn register_internal(c: Box<dyn Collector>) -> prometheus::Result<()> { pub fn register_internal(c: Box<dyn Collector>) -> Result<()> {
INTERNAL_REGISTRY.register(c) INTERNAL_REGISTRY.register(c)
} }
@@ -107,127 +96,6 @@ pub const DISK_WRITE_SECONDS_BUCKETS: &[f64] = &[
0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5, 0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5,
]; ];
pub struct BuildInfo {
pub revision: &'static str,
pub build_tag: &'static str,
}
// todo: allow label group without the set
impl LabelGroup for BuildInfo {
fn visit_values(&self, v: &mut impl LabelGroupVisitor) {
const REVISION: &LabelName = LabelName::from_str("revision");
v.write_value(REVISION, &self.revision);
const BUILD_TAG: &LabelName = LabelName::from_str("build_tag");
v.write_value(BUILD_TAG, &self.build_tag);
}
}
impl<T: Encoding> MetricFamilyEncoding<T> for BuildInfo
where
GaugeState: MetricEncoding<T>,
{
fn collect_family_into(
&self,
name: impl measured::metric::name::MetricNameEncoder,
enc: &mut T,
) -> Result<(), T::Err> {
enc.write_help(&name, "Build/version information")?;
GaugeState::write_type(&name, enc)?;
GaugeState {
count: std::sync::atomic::AtomicI64::new(1),
}
.collect_into(&(), self, name, enc)
}
}
#[derive(MetricGroup)]
#[metric(new(build_info: BuildInfo))]
pub struct NeonMetrics {
#[cfg(target_os = "linux")]
#[metric(namespace = "process")]
#[metric(init = measured_process::ProcessCollector::for_self())]
process: measured_process::ProcessCollector,
#[metric(namespace = "libmetrics")]
#[metric(init = LibMetrics::new(build_info))]
libmetrics: LibMetrics,
}
#[derive(MetricGroup)]
#[metric(new(build_info: BuildInfo))]
pub struct LibMetrics {
#[metric(init = build_info)]
build_info: BuildInfo,
#[metric(flatten)]
rusage: Rusage,
serve_count: CollectionCounter,
}
fn write_gauge<Enc: Encoding>(
x: i64,
labels: impl LabelGroup,
name: impl MetricNameEncoder,
enc: &mut Enc,
) -> Result<(), Enc::Err> {
enc.write_metric_value(name, labels, MetricValue::Int(x))
}
#[derive(Default)]
struct Rusage;
#[derive(FixedCardinalityLabel, Clone, Copy)]
#[label(singleton = "io_operation")]
enum IoOp {
Read,
Write,
}
impl<T: Encoding> MetricGroup<T> for Rusage
where
GaugeState: MetricEncoding<T>,
{
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
const DISK_IO: &MetricName = MetricName::from_str("disk_io_bytes_total");
const MAXRSS: &MetricName = MetricName::from_str("maxrss_kb");
let ru = get_rusage_stats();
enc.write_help(
DISK_IO,
"Bytes written and read from disk, grouped by the operation (read|write)",
)?;
GaugeState::write_type(DISK_IO, enc)?;
write_gauge(ru.ru_inblock * BYTES_IN_BLOCK, IoOp::Read, DISK_IO, enc)?;
write_gauge(ru.ru_oublock * BYTES_IN_BLOCK, IoOp::Write, DISK_IO, enc)?;
enc.write_help(MAXRSS, "Memory usage (Maximum Resident Set Size)")?;
GaugeState::write_type(MAXRSS, enc)?;
write_gauge(ru.ru_maxrss, IoOp::Read, MAXRSS, enc)?;
Ok(())
}
}
#[derive(Default)]
struct CollectionCounter(CounterState);
impl<T: Encoding> MetricFamilyEncoding<T> for CollectionCounter
where
CounterState: MetricEncoding<T>,
{
fn collect_family_into(
&self,
name: impl measured::metric::name::MetricNameEncoder,
enc: &mut T,
) -> Result<(), T::Err> {
self.0.inc();
enc.write_help(&name, "Number of metric requests made")?;
self.0.collect_into(&(), NoLabels, name, enc)
}
}
pub fn set_build_info_metric(revision: &str, build_tag: &str) { pub fn set_build_info_metric(revision: &str, build_tag: &str) {
let metric = register_int_gauge_vec!( let metric = register_int_gauge_vec!(
"libmetrics_build_info", "libmetrics_build_info",
@@ -237,7 +105,6 @@ pub fn set_build_info_metric(revision: &str, build_tag: &str) {
.expect("Failed to register build info metric"); .expect("Failed to register build info metric");
metric.with_label_values(&[revision, build_tag]).set(1); metric.with_label_values(&[revision, build_tag]).set(1);
} }
const BYTES_IN_BLOCK: i64 = 512;
// Records I/O stats in a "cross-platform" way. // Records I/O stats in a "cross-platform" way.
// Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats. // Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats.
@@ -250,22 +117,14 @@ const BYTES_IN_BLOCK: i64 = 512;
fn update_rusage_metrics() { fn update_rusage_metrics() {
let rusage_stats = get_rusage_stats(); let rusage_stats = get_rusage_stats();
const BYTES_IN_BLOCK: i64 = 512;
DISK_IO_BYTES DISK_IO_BYTES
.with_label_values(&["read"]) .with_label_values(&["read"])
.set(rusage_stats.ru_inblock * BYTES_IN_BLOCK); .set(rusage_stats.ru_inblock * BYTES_IN_BLOCK);
DISK_IO_BYTES DISK_IO_BYTES
.with_label_values(&["write"]) .with_label_values(&["write"])
.set(rusage_stats.ru_oublock * BYTES_IN_BLOCK); .set(rusage_stats.ru_oublock * BYTES_IN_BLOCK);
MAXRSS_KB.set(rusage_stats.ru_maxrss);
// On macOS, the unit of maxrss is bytes; on Linux, it's kilobytes. https://stackoverflow.com/a/59915669
#[cfg(target_os = "macos")]
{
MAXRSS_KB.set(rusage_stats.ru_maxrss / 1024);
}
#[cfg(not(target_os = "macos"))]
{
MAXRSS_KB.set(rusage_stats.ru_maxrss);
}
} }
fn get_rusage_stats() -> libc::rusage { fn get_rusage_stats() -> libc::rusage {
@@ -292,7 +151,6 @@ macro_rules! register_int_counter_pair_vec {
} }
}}; }};
} }
/// Create an [`IntCounterPair`] and registers to default registry. /// Create an [`IntCounterPair`] and registers to default registry.
#[macro_export(local_inner_macros)] #[macro_export(local_inner_macros)]
macro_rules! register_int_counter_pair { macro_rules! register_int_counter_pair {
@@ -330,10 +188,7 @@ impl<P: Atomic> GenericCounterPairVec<P> {
/// ///
/// An error is returned if the number of label values is not the same as the /// An error is returned if the number of label values is not the same as the
/// number of VariableLabels in Desc. /// number of VariableLabels in Desc.
pub fn get_metric_with_label_values( pub fn get_metric_with_label_values(&self, vals: &[&str]) -> Result<GenericCounterPair<P>> {
&self,
vals: &[&str],
) -> prometheus::Result<GenericCounterPair<P>> {
Ok(GenericCounterPair { Ok(GenericCounterPair {
inc: self.inc.get_metric_with_label_values(vals)?, inc: self.inc.get_metric_with_label_values(vals)?,
dec: self.dec.get_metric_with_label_values(vals)?, dec: self.dec.get_metric_with_label_values(vals)?,
@@ -346,7 +201,7 @@ impl<P: Atomic> GenericCounterPairVec<P> {
self.get_metric_with_label_values(vals).unwrap() self.get_metric_with_label_values(vals).unwrap()
} }
pub fn remove_label_values(&self, res: &mut [prometheus::Result<()>; 2], vals: &[&str]) { pub fn remove_label_values(&self, res: &mut [Result<()>; 2], vals: &[&str]) {
res[0] = self.inc.remove_label_values(vals); res[0] = self.inc.remove_label_values(vals);
res[1] = self.dec.remove_label_values(vals); res[1] = self.dec.remove_label_values(vals);
} }
@@ -430,171 +285,3 @@ pub type IntCounterPair = GenericCounterPair<AtomicU64>;
/// A guard for [`IntCounterPair`] that will decrement the gauge on drop /// A guard for [`IntCounterPair`] that will decrement the gauge on drop
pub type IntCounterPairGuard = GenericCounterPairGuard<AtomicU64>; pub type IntCounterPairGuard = GenericCounterPairGuard<AtomicU64>;
pub trait CounterPairAssoc {
const INC_NAME: &'static MetricName;
const DEC_NAME: &'static MetricName;
const INC_HELP: &'static str;
const DEC_HELP: &'static str;
type LabelGroupSet: LabelGroupSet;
}
pub struct CounterPairVec<A: CounterPairAssoc> {
vec: measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>,
}
impl<A: CounterPairAssoc> Default for CounterPairVec<A>
where
A::LabelGroupSet: Default,
{
fn default() -> Self {
Self {
vec: Default::default(),
}
}
}
impl<A: CounterPairAssoc> CounterPairVec<A> {
pub fn guard(
&self,
labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>,
) -> MeasuredCounterPairGuard<'_, A> {
let id = self.vec.with_labels(labels);
self.vec.get_metric(id).inc.inc();
MeasuredCounterPairGuard { vec: &self.vec, id }
}
pub fn inc(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) {
let id = self.vec.with_labels(labels);
self.vec.get_metric(id).inc.inc();
}
pub fn dec(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) {
let id = self.vec.with_labels(labels);
self.vec.get_metric(id).dec.inc();
}
pub fn remove_metric(
&self,
labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>,
) -> Option<MeasuredCounterPairState> {
let id = self.vec.with_labels(labels);
self.vec.remove_metric(id)
}
}
impl<T, A> ::measured::metric::group::MetricGroup<T> for CounterPairVec<A>
where
T: ::measured::metric::group::Encoding,
A: CounterPairAssoc,
::measured::metric::counter::CounterState: ::measured::metric::MetricEncoding<T>,
{
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
// write decrement first to avoid a race condition where inc - dec < 0
T::write_help(enc, A::DEC_NAME, A::DEC_HELP)?;
self.vec
.collect_family_into(A::DEC_NAME, &mut Dec(&mut *enc))?;
T::write_help(enc, A::INC_NAME, A::INC_HELP)?;
self.vec
.collect_family_into(A::INC_NAME, &mut Inc(&mut *enc))?;
Ok(())
}
}
#[derive(MetricGroup, Default)]
pub struct MeasuredCounterPairState {
pub inc: CounterState,
pub dec: CounterState,
}
impl measured::metric::MetricType for MeasuredCounterPairState {
type Metadata = ();
}
pub struct MeasuredCounterPairGuard<'a, A: CounterPairAssoc> {
vec: &'a measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>,
id: measured::metric::LabelId<A::LabelGroupSet>,
}
impl<A: CounterPairAssoc> Drop for MeasuredCounterPairGuard<'_, A> {
fn drop(&mut self) {
self.vec.get_metric(self.id).dec.inc();
}
}
/// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the inc counter to the inner encoder.
struct Inc<T>(T);
/// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the dec counter to the inner encoder.
struct Dec<T>(T);
impl<T: Encoding> Encoding for Inc<T> {
type Err = T::Err;
fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> {
self.0.write_help(name, help)
}
fn write_metric_value(
&mut self,
name: impl MetricNameEncoder,
labels: impl LabelGroup,
value: MetricValue,
) -> Result<(), Self::Err> {
self.0.write_metric_value(name, labels, value)
}
}
impl<T: Encoding> MetricEncoding<Inc<T>> for MeasuredCounterPairState
where
CounterState: MetricEncoding<T>,
{
fn write_type(name: impl MetricNameEncoder, enc: &mut Inc<T>) -> Result<(), T::Err> {
CounterState::write_type(name, &mut enc.0)
}
fn collect_into(
&self,
metadata: &(),
labels: impl LabelGroup,
name: impl MetricNameEncoder,
enc: &mut Inc<T>,
) -> Result<(), T::Err> {
self.inc.collect_into(metadata, labels, name, &mut enc.0)
}
}
impl<T: Encoding> Encoding for Dec<T> {
type Err = T::Err;
fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> {
self.0.write_help(name, help)
}
fn write_metric_value(
&mut self,
name: impl MetricNameEncoder,
labels: impl LabelGroup,
value: MetricValue,
) -> Result<(), Self::Err> {
self.0.write_metric_value(name, labels, value)
}
}
/// Write the dec counter to the encoder
impl<T: Encoding> MetricEncoding<Dec<T>> for MeasuredCounterPairState
where
CounterState: MetricEncoding<T>,
{
fn write_type(name: impl MetricNameEncoder, enc: &mut Dec<T>) -> Result<(), T::Err> {
CounterState::write_type(name, &mut enc.0)
}
fn collect_into(
&self,
metadata: &(),
labels: impl LabelGroup,
name: impl MetricNameEncoder,
enc: &mut Dec<T>,
) -> Result<(), T::Err> {
self.dec.collect_into(metadata, labels, name, &mut enc.0)
}
}

View File

@@ -2,9 +2,9 @@ use std::str::FromStr;
/// Request/response types for the storage controller /// Request/response types for the storage controller
/// API (`/control/v1` prefix). Implemented by the server /// API (`/control/v1` prefix). Implemented by the server
/// in [`storage_controller::http`] /// in [`attachment_service::http`]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::id::{NodeId, TenantId}; use utils::id::NodeId;
use crate::{ use crate::{
models::{ShardParameters, TenantConfig}, models::{ShardParameters, TenantConfig},
@@ -42,12 +42,6 @@ pub struct NodeConfigureRequest {
pub scheduling: Option<NodeSchedulingPolicy>, pub scheduling: Option<NodeSchedulingPolicy>,
} }
#[derive(Serialize, Deserialize)]
pub struct TenantPolicyRequest {
pub placement: Option<PlacementPolicy>,
pub scheduling: Option<ShardSchedulingPolicy>,
}
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct TenantLocateResponseShard { pub struct TenantLocateResponseShard {
pub shard_id: TenantShardId, pub shard_id: TenantShardId,
@@ -68,27 +62,12 @@ pub struct TenantLocateResponse {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct TenantDescribeResponse { pub struct TenantDescribeResponse {
pub tenant_id: TenantId,
pub shards: Vec<TenantDescribeResponseShard>, pub shards: Vec<TenantDescribeResponseShard>,
pub stripe_size: ShardStripeSize, pub stripe_size: ShardStripeSize,
pub policy: PlacementPolicy, pub policy: PlacementPolicy,
pub config: TenantConfig, pub config: TenantConfig,
} }
#[derive(Serialize, Deserialize)]
pub struct NodeDescribeResponse {
pub id: NodeId,
pub availability: NodeAvailabilityWrapper,
pub scheduling: NodeSchedulingPolicy,
pub listen_http_addr: String,
pub listen_http_port: u16,
pub listen_pg_addr: String,
pub listen_pg_port: u16,
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct TenantDescribeResponseShard { pub struct TenantDescribeResponseShard {
pub tenant_shard_id: TenantShardId, pub tenant_shard_id: TenantShardId,
@@ -104,8 +83,6 @@ pub struct TenantDescribeResponseShard {
pub is_pending_compute_notification: bool, pub is_pending_compute_notification: bool,
/// A shard split is currently underway /// A shard split is currently underway
pub is_splitting: bool, pub is_splitting: bool,
pub scheduling_policy: ShardSchedulingPolicy,
} }
/// Explicitly migrating a particular shard is a low level operation /// Explicitly migrating a particular shard is a low level operation
@@ -120,7 +97,7 @@ pub struct TenantShardMigrateRequest {
/// Utilisation score indicating how good a candidate a pageserver /// Utilisation score indicating how good a candidate a pageserver
/// is for scheduling the next tenant. See [`crate::models::PageserverUtilization`]. /// is for scheduling the next tenant. See [`crate::models::PageserverUtilization`].
/// Lower values are better. /// Lower values are better.
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Debug)] #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
pub struct UtilizationScore(pub u64); pub struct UtilizationScore(pub u64);
impl UtilizationScore { impl UtilizationScore {
@@ -129,7 +106,7 @@ impl UtilizationScore {
} }
} }
#[derive(Serialize, Deserialize, Clone, Copy, Debug)] #[derive(Serialize, Clone, Copy)]
#[serde(into = "NodeAvailabilityWrapper")] #[serde(into = "NodeAvailabilityWrapper")]
pub enum NodeAvailability { pub enum NodeAvailability {
// Normal, happy state // Normal, happy state
@@ -152,7 +129,7 @@ impl Eq for NodeAvailability {}
// This wrapper provides serde functionality and it should only be used to // This wrapper provides serde functionality and it should only be used to
// communicate with external callers which don't know or care about the // communicate with external callers which don't know or care about the
// utilisation score of the pageserver it is targeting. // utilisation score of the pageserver it is targeting.
#[derive(Serialize, Deserialize, Clone, Copy, Debug)] #[derive(Serialize, Deserialize, Clone)]
pub enum NodeAvailabilityWrapper { pub enum NodeAvailabilityWrapper {
Active, Active,
Offline, Offline,
@@ -178,33 +155,22 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
} }
} }
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] impl FromStr for NodeAvailability {
pub enum ShardSchedulingPolicy { type Err = anyhow::Error;
// Normal mode: the tenant's scheduled locations may be updated at will, including
// for non-essential optimization.
Active,
// Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy. fn from_str(s: &str) -> Result<Self, Self::Err> {
// For example, this still permits a node's attachment location to change to a secondary in match s {
// response to a node failure, or to assign a new secondary if a node was removed. // This is used when parsing node configuration requests from neon-local.
Essential, // Assume the worst possible utilisation score
// and let it get updated via the heartbeats.
// No scheduling: leave the shard running wherever it currently is. Even if the shard is "active" => Ok(Self::Active(UtilizationScore::worst())),
// unavailable, it will not be rescheduled to another node. "offline" => Ok(Self::Offline),
Pause, _ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
}
// No reconciling: we will make no location_conf API calls to pageservers at all. If the
// shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
Stop,
}
impl Default for ShardSchedulingPolicy {
fn default() -> Self {
Self::Active
} }
} }
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq)]
pub enum NodeSchedulingPolicy { pub enum NodeSchedulingPolicy {
Active, Active,
Filling, Filling,

View File

@@ -1,6 +1,5 @@
use anyhow::{bail, Result}; use anyhow::{bail, Result};
use byteorder::{ByteOrder, BE}; use byteorder::{ByteOrder, BE};
use bytes::BufMut;
use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
use postgres_ffi::{Oid, TransactionId}; use postgres_ffi::{Oid, TransactionId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -22,107 +21,15 @@ pub struct Key {
pub field6: u32, pub field6: u32,
} }
/// The storage key size.
pub const KEY_SIZE: usize = 18; pub const KEY_SIZE: usize = 18;
/// The metadata key size. 2B fewer than the storage key size because field2 is not fully utilized.
/// See [`Key::to_i128`] for more information on the encoding.
pub const METADATA_KEY_SIZE: usize = 16;
/// The key prefix start range for the metadata keys. All keys with the first byte >= 0x40 is a metadata key.
pub const METADATA_KEY_BEGIN_PREFIX: u8 = 0x60;
pub const METADATA_KEY_END_PREFIX: u8 = 0x7F;
/// The (reserved) key prefix of relation sizes.
pub const RELATION_SIZE_PREFIX: u8 = 0x61;
/// The key prefix of AUX file keys.
pub const AUX_KEY_PREFIX: u8 = 0x62;
/// Check if the key falls in the range of metadata keys.
pub const fn is_metadata_key_slice(key: &[u8]) -> bool {
key[0] >= METADATA_KEY_BEGIN_PREFIX && key[0] < METADATA_KEY_END_PREFIX
}
impl Key { impl Key {
/// Check if the key falls in the range of metadata keys.
pub const fn is_metadata_key(&self) -> bool {
self.field1 >= METADATA_KEY_BEGIN_PREFIX && self.field1 < METADATA_KEY_END_PREFIX
}
/// Encode a metadata key to a storage key.
pub fn from_metadata_key_fixed_size(key: &[u8; METADATA_KEY_SIZE]) -> Self {
assert!(is_metadata_key_slice(key), "key not in metadata key range");
Key {
field1: key[0],
field2: u16::from_be_bytes(key[1..3].try_into().unwrap()) as u32,
field3: u32::from_be_bytes(key[3..7].try_into().unwrap()),
field4: u32::from_be_bytes(key[7..11].try_into().unwrap()),
field5: key[11],
field6: u32::from_be_bytes(key[12..16].try_into().unwrap()),
}
}
/// Encode a metadata key to a storage key.
pub fn from_metadata_key(key: &[u8]) -> Self {
Self::from_metadata_key_fixed_size(key.try_into().expect("expect 16 byte metadata key"))
}
/// Extract a metadata key to a writer. The result should always be 16 bytes.
pub fn extract_metadata_key_to_writer(&self, mut writer: impl BufMut) {
writer.put_u8(self.field1);
assert!(self.field2 <= 0xFFFF);
writer.put_u16(self.field2 as u16);
writer.put_u32(self.field3);
writer.put_u32(self.field4);
writer.put_u8(self.field5);
writer.put_u32(self.field6);
}
/// Get the range of metadata keys.
pub fn metadata_key_range() -> Range<Self> {
Key {
field1: METADATA_KEY_BEGIN_PREFIX,
field2: 0,
field3: 0,
field4: 0,
field5: 0,
field6: 0,
}..Key {
field1: METADATA_KEY_END_PREFIX,
field2: 0,
field3: 0,
field4: 0,
field5: 0,
field6: 0,
}
}
/// Get the range of aux keys.
pub fn metadata_aux_key_range() -> Range<Self> {
Key {
field1: AUX_KEY_PREFIX,
field2: 0,
field3: 0,
field4: 0,
field5: 0,
field6: 0,
}..Key {
field1: AUX_KEY_PREFIX + 1,
field2: 0,
field3: 0,
field4: 0,
field5: 0,
field6: 0,
}
}
/// 'field2' is used to store tablespaceid for relations and small enum numbers for other relish. /// 'field2' is used to store tablespaceid for relations and small enum numbers for other relish.
/// As long as Neon does not support tablespace (because of lack of access to local file system), /// As long as Neon does not support tablespace (because of lack of access to local file system),
/// we can assume that only some predefined namespace OIDs are used which can fit in u16 /// we can assume that only some predefined namespace OIDs are used which can fit in u16
pub fn to_i128(&self) -> i128 { pub fn to_i128(&self) -> i128 {
assert!(self.field2 < 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222); assert!(self.field2 < 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222);
(((self.field1 & 0x7F) as i128) << 120) (((self.field1 & 0xf) as i128) << 120)
| (((self.field2 & 0xFFFF) as i128) << 104) | (((self.field2 & 0xFFFF) as i128) << 104)
| ((self.field3 as i128) << 72) | ((self.field3 as i128) << 72)
| ((self.field4 as i128) << 40) | ((self.field4 as i128) << 40)
@@ -132,7 +39,7 @@ impl Key {
pub const fn from_i128(x: i128) -> Self { pub const fn from_i128(x: i128) -> Self {
Key { Key {
field1: ((x >> 120) & 0x7F) as u8, field1: ((x >> 120) & 0xf) as u8,
field2: ((x >> 104) & 0xFFFF) as u32, field2: ((x >> 104) & 0xFFFF) as u32,
field3: (x >> 72) as u32, field3: (x >> 72) as u32,
field4: (x >> 40) as u32, field4: (x >> 40) as u32,
@@ -141,11 +48,11 @@ impl Key {
} }
} }
pub const fn next(&self) -> Key { pub fn next(&self) -> Key {
self.add(1) self.add(1)
} }
pub const fn add(&self, x: u32) -> Key { pub fn add(&self, x: u32) -> Key {
let mut key = *self; let mut key = *self;
let r = key.field6.overflowing_add(x); let r = key.field6.overflowing_add(x);
@@ -174,8 +81,6 @@ impl Key {
key key
} }
/// Convert a 18B slice to a key. This function should not be used for metadata keys because field2 is handled differently.
/// Use [`Key::from_metadata_key`] instead.
pub fn from_slice(b: &[u8]) -> Self { pub fn from_slice(b: &[u8]) -> Self {
Key { Key {
field1: b[0], field1: b[0],
@@ -187,8 +92,6 @@ impl Key {
} }
} }
/// Convert a key to a 18B slice. This function should not be used for metadata keys because field2 is handled differently.
/// Use [`Key::extract_metadata_key_to_writer`] instead.
pub fn write_to_byte_slice(&self, buf: &mut [u8]) { pub fn write_to_byte_slice(&self, buf: &mut [u8]) {
buf[0] = self.field1; buf[0] = self.field1;
BE::write_u32(&mut buf[1..5], self.field2); BE::write_u32(&mut buf[1..5], self.field2);
@@ -572,14 +475,12 @@ pub const AUX_FILES_KEY: Key = Key {
// Reverse mappings for a few Keys. // Reverse mappings for a few Keys.
// These are needed by WAL redo manager. // These are needed by WAL redo manager.
pub const NON_INHERITED_RANGE: Range<Key> = AUX_FILES_KEY..AUX_FILES_KEY.next();
// AUX_FILES currently stores only data for logical replication (slots etc), and // AUX_FILES currently stores only data for logical replication (slots etc), and
// we don't preserve these on a branch because safekeepers can't follow timeline // we don't preserve these on a branch because safekeepers can't follow timeline
// switch (and generally it likely should be optional), so ignore these. // switch (and generally it likely should be optional), so ignore these.
#[inline(always)] #[inline(always)]
pub fn is_inherited_key(key: Key) -> bool { pub fn is_inherited_key(key: Key) -> bool {
!NON_INHERITED_RANGE.contains(&key) key != AUX_FILES_KEY
} }
#[inline(always)] #[inline(always)]
@@ -655,14 +556,11 @@ impl std::str::FromStr for Key {
mod tests { mod tests {
use std::str::FromStr; use std::str::FromStr;
use crate::key::is_metadata_key_slice;
use crate::key::Key; use crate::key::Key;
use rand::Rng; use rand::Rng;
use rand::SeedableRng; use rand::SeedableRng;
use super::AUX_KEY_PREFIX;
#[test] #[test]
fn display_fromstr_bijection() { fn display_fromstr_bijection() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42); let mut rng = rand::rngs::StdRng::seed_from_u64(42);
@@ -678,16 +576,4 @@ mod tests {
assert_eq!(key, Key::from_str(&format!("{key}")).unwrap()); assert_eq!(key, Key::from_str(&format!("{key}")).unwrap());
} }
#[test]
fn test_metadata_keys() {
let mut metadata_key = vec![AUX_KEY_PREFIX];
metadata_key.extend_from_slice(&[0xFF; 15]);
let encoded_key = Key::from_metadata_key(&metadata_key);
let mut output_key = Vec::new();
encoded_key.extract_metadata_key_to_writer(&mut output_key);
assert_eq!(metadata_key, output_key);
assert!(encoded_key.is_metadata_key());
assert!(is_metadata_key_slice(&metadata_key));
}
} }

View File

@@ -1,10 +1,7 @@
use postgres_ffi::BLCKSZ; use postgres_ffi::BLCKSZ;
use std::ops::Range; use std::ops::Range;
use crate::{ use crate::key::Key;
key::Key,
shard::{ShardCount, ShardIdentity},
};
use itertools::Itertools; use itertools::Itertools;
/// ///
@@ -17,279 +14,44 @@ pub struct KeySpace {
pub ranges: Vec<Range<Key>>, pub ranges: Vec<Range<Key>>,
} }
/// A wrapper type for sparse keyspaces.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct SparseKeySpace(pub KeySpace);
/// Represents a contiguous half-open range of the keyspace, masked according to a particular
/// ShardNumber's stripes: within this range of keys, only some "belong" to the current
/// shard.
///
/// When we iterate over keys within this object, we will skip any keys that don't belong
/// to this shard.
///
/// The start + end keys may not belong to the shard: these specify where layer files should
/// start + end, but we will never actually read/write those keys.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ShardedRange<'a> {
pub shard_identity: &'a ShardIdentity,
pub range: Range<Key>,
}
// Calculate the size of a range within the blocks of the same relation, or spanning only the
// top page in the previous relation's space.
fn contiguous_range_len(range: &Range<Key>) -> u32 {
debug_assert!(is_contiguous_range(range));
if range.start.field6 == 0xffffffff {
range.end.field6 + 1
} else {
range.end.field6 - range.start.field6
}
}
/// Return true if this key range includes only keys in the same relation's data blocks, or
/// just spanning one relation and the logical size (0xffffffff) block of the relation before it.
///
/// Contiguous in this context means we know the keys are in use _somewhere_, but it might not
/// be on our shard. Later in ShardedRange we do the extra work to figure out how much
/// of a given contiguous range is present on one shard.
///
/// This matters, because:
/// - Within such ranges, keys are used contiguously. Outside such ranges it is sparse.
/// - Within such ranges, we may calculate distances using simple subtraction of field6.
fn is_contiguous_range(range: &Range<Key>) -> bool {
range.start.field1 == range.end.field1
&& range.start.field2 == range.end.field2
&& range.start.field3 == range.end.field3
&& range.start.field4 == range.end.field4
&& (range.start.field5 == range.end.field5
|| (range.start.field6 == 0xffffffff && range.start.field5 + 1 == range.end.field5))
}
impl<'a> ShardedRange<'a> {
pub fn new(range: Range<Key>, shard_identity: &'a ShardIdentity) -> Self {
Self {
shard_identity,
range,
}
}
/// Break up this range into chunks, each of which has at least one local key in it if the
/// total range has at least one local key.
pub fn fragment(self, target_nblocks: u32) -> Vec<(u32, Range<Key>)> {
// Optimization for single-key case (e.g. logical size keys)
if self.range.end == self.range.start.add(1) {
return vec![(
if self.shard_identity.is_key_disposable(&self.range.start) {
0
} else {
1
},
self.range,
)];
}
if !is_contiguous_range(&self.range) {
// Ranges that span relations are not fragmented. We only get these ranges as a result
// of operations that act on existing layers, so we trust that the existing range is
// reasonably small.
return vec![(u32::MAX, self.range)];
}
let mut fragments: Vec<(u32, Range<Key>)> = Vec::new();
let mut cursor = self.range.start;
while cursor < self.range.end {
let advance_by = self.distance_to_next_boundary(cursor);
let is_fragment_disposable = self.shard_identity.is_key_disposable(&cursor);
// If the previous fragment is undersized, then we seek to consume enough
// blocks to complete it.
let (want_blocks, merge_last_fragment) = match fragments.last_mut() {
Some(frag) if frag.0 < target_nblocks => (target_nblocks - frag.0, Some(frag)),
Some(frag) => {
// Prev block is complete, want the full number.
(
target_nblocks,
if is_fragment_disposable {
// If this current range will be empty (not shard-local data), we will merge into previous
Some(frag)
} else {
None
},
)
}
None => {
// First iteration, want the full number
(target_nblocks, None)
}
};
let advance_by = if is_fragment_disposable {
advance_by
} else {
std::cmp::min(advance_by, want_blocks)
};
let next_cursor = cursor.add(advance_by);
let this_frag = (
if is_fragment_disposable {
0
} else {
advance_by
},
cursor..next_cursor,
);
cursor = next_cursor;
if let Some(last_fragment) = merge_last_fragment {
// Previous fragment was short or this one is empty, merge into it
last_fragment.0 += this_frag.0;
last_fragment.1.end = this_frag.1.end;
} else {
fragments.push(this_frag);
}
}
fragments
}
/// Estimate the physical pages that are within this range, on this shard. This returns
/// u32::MAX if the range spans relations: this return value should be interpreted as "large".
pub fn page_count(&self) -> u32 {
// Special cases for single keys like logical sizes
if self.range.end == self.range.start.add(1) {
return if self.shard_identity.is_key_disposable(&self.range.start) {
0
} else {
1
};
}
// We can only do an authentic calculation of contiguous key ranges
if !is_contiguous_range(&self.range) {
return u32::MAX;
}
// Special case for single sharded tenants: our logical and physical sizes are the same
if self.shard_identity.count < ShardCount::new(2) {
return contiguous_range_len(&self.range);
}
// Normal path: step through stripes and part-stripes in the range, evaluate whether each one belongs
// to Self, and add the stripe's block count to our total if so.
let mut result: u64 = 0;
let mut cursor = self.range.start;
while cursor < self.range.end {
// Count up to the next stripe_size boundary or end of range
let advance_by = self.distance_to_next_boundary(cursor);
// If this blocks in this stripe belong to us, add them to our count
if !self.shard_identity.is_key_disposable(&cursor) {
result += advance_by as u64;
}
cursor = cursor.add(advance_by);
}
if result > u32::MAX as u64 {
u32::MAX
} else {
result as u32
}
}
/// Advance the cursor to the next potential fragment boundary: this is either
/// a stripe boundary, or the end of the range.
fn distance_to_next_boundary(&self, cursor: Key) -> u32 {
let distance_to_range_end = contiguous_range_len(&(cursor..self.range.end));
if self.shard_identity.count < ShardCount::new(2) {
// Optimization: don't bother stepping through stripes if the tenant isn't sharded.
return distance_to_range_end;
}
if cursor.field6 == 0xffffffff {
// We are wrapping from one relation's logical size to the next relation's first data block
return 1;
}
let stripe_index = cursor.field6 / self.shard_identity.stripe_size.0;
let stripe_remainder = self.shard_identity.stripe_size.0
- (cursor.field6 - stripe_index * self.shard_identity.stripe_size.0);
if cfg!(debug_assertions) {
// We should never overflow field5 and field6 -- our callers check this earlier
// and would have returned their u32::MAX cases if the input range violated this.
let next_cursor = cursor.add(stripe_remainder);
debug_assert!(
next_cursor.field1 == cursor.field1
&& next_cursor.field2 == cursor.field2
&& next_cursor.field3 == cursor.field3
&& next_cursor.field4 == cursor.field4
&& next_cursor.field5 == cursor.field5
)
}
std::cmp::min(stripe_remainder, distance_to_range_end)
}
/// Whereas `page_count` estimates the number of pages physically in this range on this shard,
/// this function simply calculates the number of pages in the space, without accounting for those
/// pages that would not actually be stored on this node.
///
/// Don't use this function in code that works with physical entities like layer files.
fn raw_size(range: &Range<Key>) -> u32 {
if is_contiguous_range(range) {
contiguous_range_len(range)
} else {
u32::MAX
}
}
}
impl KeySpace { impl KeySpace {
/// Create a key space with a single range. ///
pub fn single(key_range: Range<Key>) -> Self {
Self {
ranges: vec![key_range],
}
}
/// Partition a key space into roughly chunks of roughly 'target_size' bytes /// Partition a key space into roughly chunks of roughly 'target_size' bytes
/// in each partition. /// in each partition.
/// ///
pub fn partition(&self, shard_identity: &ShardIdentity, target_size: u64) -> KeyPartitioning { pub fn partition(&self, target_size: u64) -> KeyPartitioning {
// Assume that each value is 8k in size. // Assume that each value is 8k in size.
let target_nblocks = (target_size / BLCKSZ as u64) as u32; let target_nblocks = (target_size / BLCKSZ as u64) as usize;
let mut parts = Vec::new(); let mut parts = Vec::new();
let mut current_part = Vec::new(); let mut current_part = Vec::new();
let mut current_part_size: usize = 0; let mut current_part_size: usize = 0;
for range in &self.ranges { for range in &self.ranges {
// While doing partitioning, wrap the range in ShardedRange so that our size calculations // If appending the next contiguous range in the keyspace to the current
// will respect shard striping rather than assuming all keys within a range are present. // partition would cause it to be too large, start a new partition.
let range = ShardedRange::new(range.clone(), shard_identity); let this_size = key_range_size(range) as usize;
if current_part_size + this_size > target_nblocks && !current_part.is_empty() {
// Chunk up the range into parts that each contain up to target_size local blocks parts.push(KeySpace {
for (frag_on_shard_size, frag_range) in range.fragment(target_nblocks) { ranges: current_part,
// If appending the next contiguous range in the keyspace to the current });
// partition would cause it to be too large, and our current partition current_part = Vec::new();
// covers at least one block that is physically present in this shard, current_part_size = 0;
// then start a new partition
if current_part_size + frag_on_shard_size as usize > target_nblocks as usize
&& current_part_size > 0
{
parts.push(KeySpace {
ranges: current_part,
});
current_part = Vec::new();
current_part_size = 0;
}
current_part.push(frag_range.start..frag_range.end);
current_part_size += frag_on_shard_size as usize;
} }
// If the next range is larger than 'target_size', split it into
// 'target_size' chunks.
let mut remain_size = this_size;
let mut start = range.start;
while remain_size > target_nblocks {
let next = start.add(target_nblocks as u32);
parts.push(KeySpace {
ranges: vec![start..next],
});
start = next;
remain_size -= target_nblocks
}
current_part.push(start..range.end);
current_part_size += remain_size;
} }
// add last partition that wasn't full yet. // add last partition that wasn't full yet.
@@ -302,10 +64,6 @@ impl KeySpace {
KeyPartitioning { parts } KeyPartitioning { parts }
} }
pub fn is_empty(&self) -> bool {
self.total_raw_size() == 0
}
/// Merge another keyspace into the current one. /// Merge another keyspace into the current one.
/// Note: the keyspaces must not ovelap (enforced via assertions) /// Note: the keyspaces must not ovelap (enforced via assertions)
pub fn merge(&mut self, other: &KeySpace) { pub fn merge(&mut self, other: &KeySpace) {
@@ -336,13 +94,12 @@ impl KeySpace {
/// Remove all keys in `other` from `self`. /// Remove all keys in `other` from `self`.
/// This can involve splitting or removing of existing ranges. /// This can involve splitting or removing of existing ranges.
/// Returns the removed keyspace pub fn remove_overlapping_with(&mut self, other: &KeySpace) {
pub fn remove_overlapping_with(&mut self, other: &KeySpace) -> KeySpace {
let (self_start, self_end) = match (self.start(), self.end()) { let (self_start, self_end) = match (self.start(), self.end()) {
(Some(start), Some(end)) => (start, end), (Some(start), Some(end)) => (start, end),
_ => { _ => {
// self is empty // self is empty
return KeySpace::default(); return;
} }
}; };
@@ -355,37 +112,30 @@ impl KeySpace {
.skip_while(|range| self_start >= range.end) .skip_while(|range| self_start >= range.end)
.take_while(|range| self_end > range.start); .take_while(|range| self_end > range.start);
let mut removed_accum = KeySpaceRandomAccum::new();
for range in other_ranges { for range in other_ranges {
while let Some(overlap_at) = self.overlaps_at(range) { while let Some(overlap_at) = self.overlaps_at(range) {
let overlapped = self.ranges[overlap_at].clone(); let overlapped = self.ranges[overlap_at].clone();
if overlapped.start < range.start && overlapped.end <= range.end { if overlapped.start < range.start && overlapped.end <= range.end {
// Higher part of the range is completely overlapped. // Higher part of the range is completely overlapped.
removed_accum.add_range(range.start..self.ranges[overlap_at].end);
self.ranges[overlap_at].end = range.start; self.ranges[overlap_at].end = range.start;
} }
if overlapped.start >= range.start && overlapped.end > range.end { if overlapped.start >= range.start && overlapped.end > range.end {
// Lower part of the range is completely overlapped. // Lower part of the range is completely overlapped.
removed_accum.add_range(self.ranges[overlap_at].start..range.end);
self.ranges[overlap_at].start = range.end; self.ranges[overlap_at].start = range.end;
} }
if overlapped.start < range.start && overlapped.end > range.end { if overlapped.start < range.start && overlapped.end > range.end {
// Middle part of the range is overlapped. // Middle part of the range is overlapped.
removed_accum.add_range(range.clone());
self.ranges[overlap_at].end = range.start; self.ranges[overlap_at].end = range.start;
self.ranges self.ranges
.insert(overlap_at + 1, range.end..overlapped.end); .insert(overlap_at + 1, range.end..overlapped.end);
} }
if overlapped.start >= range.start && overlapped.end <= range.end { if overlapped.start >= range.start && overlapped.end <= range.end {
// Whole range is overlapped // Whole range is overlapped
removed_accum.add_range(self.ranges[overlap_at].clone());
self.ranges.remove(overlap_at); self.ranges.remove(overlap_at);
} }
} }
} }
removed_accum.to_keyspace()
} }
pub fn start(&self) -> Option<Key> { pub fn start(&self) -> Option<Key> {
@@ -396,11 +146,11 @@ impl KeySpace {
self.ranges.last().map(|range| range.end) self.ranges.last().map(|range| range.end)
} }
/// The size of the keyspace in pages, before accounting for sharding #[allow(unused)]
pub fn total_raw_size(&self) -> usize { pub fn total_size(&self) -> usize {
self.ranges self.ranges
.iter() .iter()
.map(|range| ShardedRange::raw_size(range) as usize) .map(|range| key_range_size(range) as usize)
.sum() .sum()
} }
@@ -420,11 +170,6 @@ impl KeySpace {
pub fn overlaps(&self, range: &Range<Key>) -> bool { pub fn overlaps(&self, range: &Range<Key>) -> bool {
self.overlaps_at(range).is_some() self.overlaps_at(range).is_some()
} }
/// Check if the keyspace contains a key
pub fn contains(&self, key: &Key) -> bool {
self.overlaps(&(*key..key.next()))
}
} }
/// ///
@@ -439,33 +184,10 @@ pub struct KeyPartitioning {
pub parts: Vec<KeySpace>, pub parts: Vec<KeySpace>,
} }
/// Represents a partitioning of the sparse key space.
#[derive(Clone, Debug, Default)]
pub struct SparseKeyPartitioning {
pub parts: Vec<SparseKeySpace>,
}
impl KeyPartitioning { impl KeyPartitioning {
pub fn new() -> Self { pub fn new() -> Self {
KeyPartitioning { parts: Vec::new() } KeyPartitioning { parts: Vec::new() }
} }
/// Convert a key partitioning to a sparse partition.
pub fn into_sparse(self) -> SparseKeyPartitioning {
SparseKeyPartitioning {
parts: self.parts.into_iter().map(SparseKeySpace).collect(),
}
}
}
impl SparseKeyPartitioning {
/// Note: use this function with caution. Attempt to handle a sparse keyspace in the same way as a dense keyspace will
/// cause long/dead loops.
pub fn into_dense(self) -> KeyPartitioning {
KeyPartitioning {
parts: self.parts.into_iter().map(|x| x.0).collect(),
}
}
} }
/// ///
@@ -497,7 +219,7 @@ impl KeySpaceAccum {
#[inline(always)] #[inline(always)]
pub fn add_range(&mut self, range: Range<Key>) { pub fn add_range(&mut self, range: Range<Key>) {
self.size += ShardedRange::raw_size(&range) as u64; self.size += key_range_size(&range) as u64;
match self.accum.as_mut() { match self.accum.as_mut() {
Some(accum) => { Some(accum) => {
@@ -529,9 +251,7 @@ impl KeySpaceAccum {
std::mem::take(self).to_keyspace() std::mem::take(self).to_keyspace()
} }
// The total number of keys in this object, ignoring any sharding effects that might cause some of pub fn size(&self) -> u64 {
// the keys to be omitted in storage on this shard.
pub fn raw_size(&self) -> u64 {
self.size self.size
} }
} }
@@ -587,19 +307,36 @@ impl KeySpaceRandomAccum {
} }
} }
#[inline(always)]
pub fn key_range_size(key_range: &Range<Key>) -> u32 {
let start = key_range.start;
let end = key_range.end;
if end.field1 != start.field1
|| end.field2 != start.field2
|| end.field3 != start.field3
|| end.field4 != start.field4
{
return u32::MAX;
}
let start = (start.field5 as u64) << 32 | start.field6 as u64;
let end = (end.field5 as u64) << 32 | end.field6 as u64;
let diff = end - start;
if diff > u32::MAX as u64 {
u32::MAX
} else {
diff as u32
}
}
pub fn singleton_range(key: Key) -> Range<Key> { pub fn singleton_range(key: Key) -> Range<Key> {
key..key.next() key..key.next()
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rand::{RngCore, SeedableRng};
use crate::{
models::ShardParameters,
shard::{ShardCount, ShardNumber},
};
use super::*; use super::*;
use std::fmt::Write; use std::fmt::Write;
@@ -642,17 +379,14 @@ mod tests {
accum.add_range(range.clone()); accum.add_range(range.clone());
} }
let expected_size: u64 = ranges let expected_size: u64 = ranges.iter().map(|r| key_range_size(r) as u64).sum();
.iter() assert_eq!(accum.size(), expected_size);
.map(|r| ShardedRange::raw_size(r) as u64)
.sum();
assert_eq!(accum.raw_size(), expected_size);
assert_ks_eq(&accum.consume_keyspace(), ranges.clone()); assert_ks_eq(&accum.consume_keyspace(), ranges.clone());
assert_eq!(accum.raw_size(), 0); assert_eq!(accum.size(), 0);
assert_ks_eq(&accum.consume_keyspace(), vec![]); assert_ks_eq(&accum.consume_keyspace(), vec![]);
assert_eq!(accum.raw_size(), 0); assert_eq!(accum.size(), 0);
for range in &ranges { for range in &ranges {
accum.add_range(range.clone()); accum.add_range(range.clone());
@@ -819,16 +553,7 @@ mod tests {
Key::from_i128(11)..Key::from_i128(13), Key::from_i128(11)..Key::from_i128(13),
], ],
}; };
let removed = key_space1.remove_overlapping_with(&key_space2); key_space1.remove_overlapping_with(&key_space2);
let removed_expected = KeySpace {
ranges: vec![
Key::from_i128(2)..Key::from_i128(3),
Key::from_i128(6)..Key::from_i128(7),
Key::from_i128(11)..Key::from_i128(12),
],
};
assert_eq!(removed, removed_expected);
assert_eq!( assert_eq!(
key_space1.ranges, key_space1.ranges,
vec![ vec![
@@ -858,17 +583,7 @@ mod tests {
Key::from_i128(14)..Key::from_i128(17), Key::from_i128(14)..Key::from_i128(17),
], ],
}; };
key_space1.remove_overlapping_with(&key_space2);
let removed = key_space1.remove_overlapping_with(&key_space2);
let removed_expected = KeySpace {
ranges: vec![
Key::from_i128(3)..Key::from_i128(5),
Key::from_i128(8)..Key::from_i128(10),
Key::from_i128(14)..Key::from_i128(15),
],
};
assert_eq!(removed, removed_expected);
assert_eq!( assert_eq!(
key_space1.ranges, key_space1.ranges,
vec![ vec![
@@ -895,11 +610,7 @@ mod tests {
Key::from_i128(15)..Key::from_i128(17), Key::from_i128(15)..Key::from_i128(17),
], ],
}; };
key_space1.remove_overlapping_with(&key_space2);
let removed = key_space1.remove_overlapping_with(&key_space2);
let removed_expected = KeySpace::default();
assert_eq!(removed, removed_expected);
assert_eq!( assert_eq!(
key_space1.ranges, key_space1.ranges,
vec![ vec![
@@ -926,17 +637,7 @@ mod tests {
let key_space2 = KeySpace { let key_space2 = KeySpace {
ranges: vec![Key::from_i128(9)..Key::from_i128(19)], ranges: vec![Key::from_i128(9)..Key::from_i128(19)],
}; };
key_space1.remove_overlapping_with(&key_space2);
let removed = key_space1.remove_overlapping_with(&key_space2);
let removed_expected = KeySpace {
ranges: vec![
Key::from_i128(9)..Key::from_i128(10),
Key::from_i128(12)..Key::from_i128(15),
Key::from_i128(17)..Key::from_i128(19),
],
};
assert_eq!(removed, removed_expected);
assert_eq!( assert_eq!(
key_space1.ranges, key_space1.ranges,
vec![ vec![
@@ -949,412 +650,4 @@ mod tests {
] ]
); );
} }
#[test]
fn sharded_range_relation_gap() {
let shard_identity = ShardIdentity::new(
ShardNumber(0),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
let range = ShardedRange::new(
Range {
start: Key::from_hex("000000067F00000005000040100300000000").unwrap(),
end: Key::from_hex("000000067F00000005000040130000004000").unwrap(),
},
&shard_identity,
);
// Key range spans relations, expect MAX
assert_eq!(range.page_count(), u32::MAX);
}
#[test]
fn shard_identity_keyspaces_single_key() {
let shard_identity = ShardIdentity::new(
ShardNumber(1),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
let range = ShardedRange::new(
Range {
start: Key::from_hex("000000067f000000010000007000ffffffff").unwrap(),
end: Key::from_hex("000000067f00000001000000700100000000").unwrap(),
},
&shard_identity,
);
// Single-key range on logical size key
assert_eq!(range.page_count(), 1);
}
/// Test the helper that we use to identify ranges which go outside the data blocks of a single relation
#[test]
fn contiguous_range_check() {
assert!(!is_contiguous_range(
&(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
..Key::from_hex("000000067f00000001000004df0100000003").unwrap())
),);
// The ranges goes all the way up to the 0xffffffff, including it: this is
// not considered a rel block range because 0xffffffff stores logical sizes,
// not blocks.
assert!(!is_contiguous_range(
&(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
..Key::from_hex("000000067f00000001000004df0100000000").unwrap())
),);
// Keys within the normal data region of a relation
assert!(is_contiguous_range(
&(Key::from_hex("000000067f00000001000004df0000000000").unwrap()
..Key::from_hex("000000067f00000001000004df0000000080").unwrap())
),);
// The logical size key of one forkno, then some blocks in the next
assert!(is_contiguous_range(
&(Key::from_hex("000000067f00000001000004df00ffffffff").unwrap()
..Key::from_hex("000000067f00000001000004df0100000080").unwrap())
),);
}
#[test]
fn shard_identity_keyspaces_forkno_gap() {
let shard_identity = ShardIdentity::new(
ShardNumber(1),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
let range = ShardedRange::new(
Range {
start: Key::from_hex("000000067f00000001000004df00fffffffe").unwrap(),
end: Key::from_hex("000000067f00000001000004df0100000003").unwrap(),
},
&shard_identity,
);
// Range spanning the end of one forkno and the start of the next: we do not attempt to
// calculate a valid size, because we have no way to know if they keys between start
// and end are actually in use.
assert_eq!(range.page_count(), u32::MAX);
}
#[test]
fn shard_identity_keyspaces_one_relation() {
for shard_number in 0..4 {
let shard_identity = ShardIdentity::new(
ShardNumber(shard_number),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
let range = ShardedRange::new(
Range {
start: Key::from_hex("000000067f00000001000000ae0000000000").unwrap(),
end: Key::from_hex("000000067f00000001000000ae0000000001").unwrap(),
},
&shard_identity,
);
// Very simple case: range covering block zero of one relation, where that block maps to shard zero
if shard_number == 0 {
assert_eq!(range.page_count(), 1);
} else {
// Other shards should perceive the range's size as zero
assert_eq!(range.page_count(), 0);
}
}
}
/// Test helper: construct a ShardedRange and call fragment() on it, returning
/// the total page count in the range and the fragments.
fn do_fragment(
range_start: Key,
range_end: Key,
shard_identity: &ShardIdentity,
target_nblocks: u32,
) -> (u32, Vec<(u32, Range<Key>)>) {
let range = ShardedRange::new(
Range {
start: range_start,
end: range_end,
},
shard_identity,
);
let page_count = range.page_count();
let fragments = range.fragment(target_nblocks);
// Invariant: we always get at least one fragment
assert!(!fragments.is_empty());
// Invariant: the first/last fragment start/end should equal the input start/end
assert_eq!(fragments.first().unwrap().1.start, range_start);
assert_eq!(fragments.last().unwrap().1.end, range_end);
if page_count > 0 {
// Invariant: every fragment must contain at least one shard-local page, if the
// total range contains at least one shard-local page
let all_nonzero = fragments.iter().all(|f| f.0 > 0);
if !all_nonzero {
eprintln!("Found a zero-length fragment: {:?}", fragments);
}
assert!(all_nonzero);
} else {
// A range with no shard-local pages should always be returned as a single fragment
assert_eq!(fragments, vec![(0, range_start..range_end)]);
}
// Invariant: fragments must be ordered and non-overlapping
let mut last: Option<Range<Key>> = None;
for frag in &fragments {
if let Some(last) = last {
assert!(frag.1.start >= last.end);
assert!(frag.1.start > last.start);
}
last = Some(frag.1.clone())
}
// Invariant: fragments respect target_nblocks
for frag in &fragments {
assert!(frag.0 == u32::MAX || frag.0 <= target_nblocks);
}
(page_count, fragments)
}
/// Really simple tests for fragment(), on a range that just contains a single stripe
/// for a single tenant.
#[test]
fn sharded_range_fragment_simple() {
let shard_identity = ShardIdentity::new(
ShardNumber(0),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
// A range which we happen to know covers exactly one stripe which belongs to this shard
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
let input_end = Key::from_hex("000000067f00000001000000ae0000008000").unwrap();
// Ask for stripe_size blocks, we get the whole stripe
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 32768),
(32768, vec![(32768, input_start..input_end)])
);
// Ask for more, we still get the whole stripe
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 10000000),
(32768, vec![(32768, input_start..input_end)])
);
// Ask for target_nblocks of half the stripe size, we get two halves
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 16384),
(
32768,
vec![
(16384, input_start..input_start.add(16384)),
(16384, input_start.add(16384)..input_end)
]
)
);
}
#[test]
fn sharded_range_fragment_multi_stripe() {
let shard_identity = ShardIdentity::new(
ShardNumber(0),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
// A range which covers multiple stripes, exactly one of which belongs to the current shard.
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
let input_end = Key::from_hex("000000067f00000001000000ae0000020000").unwrap();
// Ask for all the blocks, get a fragment that covers the whole range but reports
// its size to be just the blocks belonging to our shard.
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 131072),
(32768, vec![(32768, input_start..input_end)])
);
// Ask for a sub-stripe quantity
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 16000),
(
32768,
vec![
(16000, input_start..input_start.add(16000)),
(16000, input_start.add(16000)..input_start.add(32000)),
(768, input_start.add(32000)..input_end),
]
)
);
// Try on a range that starts slightly after our owned stripe
assert_eq!(
do_fragment(input_start.add(1), input_end, &shard_identity, 131072),
(32767, vec![(32767, input_start.add(1)..input_end)])
);
}
/// Test our calculations work correctly when we start a range from the logical size key of
/// a previous relation.
#[test]
fn sharded_range_fragment_starting_from_logical_size() {
let input_start = Key::from_hex("000000067f00000001000000ae00ffffffff").unwrap();
let input_end = Key::from_hex("000000067f00000001000000ae0100008000").unwrap();
// Shard 0 owns the first stripe in the relation, and the preceding logical size is shard local too
let shard_identity = ShardIdentity::new(
ShardNumber(0),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 0x10000),
(0x8001, vec![(0x8001, input_start..input_end)])
);
// Shard 1 does not own the first stripe in the relation, but it does own the logical size (all shards
// store all logical sizes)
let shard_identity = ShardIdentity::new(
ShardNumber(1),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 0x10000),
(0x1, vec![(0x1, input_start..input_end)])
);
}
/// Test that ShardedRange behaves properly when used on un-sharded data
#[test]
fn sharded_range_fragment_unsharded() {
let shard_identity = ShardIdentity::unsharded();
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
let input_end = Key::from_hex("000000067f00000001000000ae0000010000").unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 0x8000),
(
0x10000,
vec![
(0x8000, input_start..input_start.add(0x8000)),
(0x8000, input_start.add(0x8000)..input_start.add(0x10000))
]
)
);
}
#[test]
fn sharded_range_fragment_cross_relation() {
let shard_identity = ShardIdentity::unsharded();
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
let input_end = Key::from_hex("000000068f00000001000000ae0000010000").unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 0x8000),
(u32::MAX, vec![(u32::MAX, input_start..input_end),])
);
// Same, but using a sharded identity
let shard_identity = ShardIdentity::new(
ShardNumber(0),
ShardCount::new(4),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 0x8000),
(u32::MAX, vec![(u32::MAX, input_start..input_end),])
);
}
#[test]
fn sharded_range_fragment_tiny_nblocks() {
let shard_identity = ShardIdentity::unsharded();
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
let input_start = Key::from_hex("000000067F00000001000004E10000000000").unwrap();
let input_end = Key::from_hex("000000067F00000001000004E10000000038").unwrap();
assert_eq!(
do_fragment(input_start, input_end, &shard_identity, 16),
(
0x38,
vec![
(16, input_start..input_start.add(16)),
(16, input_start.add(16)..input_start.add(32)),
(16, input_start.add(32)..input_start.add(48)),
(8, input_start.add(48)..input_end),
]
)
);
}
#[test]
fn sharded_range_fragment_fuzz() {
// Use a fixed seed: we don't want to explicitly pick values, but we do want
// the test to be reproducible.
let mut prng = rand::rngs::StdRng::seed_from_u64(0xdeadbeef);
for _i in 0..1000 {
let shard_identity = if prng.next_u32() % 2 == 0 {
ShardIdentity::unsharded()
} else {
let shard_count = prng.next_u32() % 127 + 1;
ShardIdentity::new(
ShardNumber((prng.next_u32() % shard_count) as u8),
ShardCount::new(shard_count as u8),
ShardParameters::DEFAULT_STRIPE_SIZE,
)
.unwrap()
};
let target_nblocks = prng.next_u32() % 65536 + 1;
let start_offset = prng.next_u32() % 16384;
// Try ranges up to 4GiB in size, that are always at least 1
let range_size = prng.next_u32() % 8192 + 1;
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
let input_start = Key::from_hex("000000067F00000001000004E10000000000")
.unwrap()
.add(start_offset);
let input_end = input_start.add(range_size);
// This test's main success conditions are the invariants baked into do_fragment
let (_total_size, fragments) =
do_fragment(input_start, input_end, &shard_identity, target_nblocks);
// Pick a random key within the range and check it appears in the output
let example_key = input_start.add(prng.next_u32() % range_size);
// Panic on unwrap if it isn't found
let example_key_frag = fragments
.iter()
.find(|f| f.1.contains(&example_key))
.unwrap();
// Check that the fragment containing our random key has a nonzero size if
// that key is shard-local
let example_key_local = !shard_identity.is_key_disposable(&example_key);
if example_key_local {
assert!(example_key_frag.0 > 0);
}
}
}
} }

View File

@@ -20,7 +20,6 @@ use utils::{
history_buffer::HistoryBufferWithDropCounter, history_buffer::HistoryBufferWithDropCounter,
id::{NodeId, TenantId, TimelineId}, id::{NodeId, TenantId, TimelineId},
lsn::Lsn, lsn::Lsn,
serde_system_time,
}; };
use crate::controller_api::PlacementPolicy; use crate::controller_api::PlacementPolicy;
@@ -302,8 +301,6 @@ pub struct TenantConfig {
pub heatmap_period: Option<String>, pub heatmap_period: Option<String>,
pub lazy_slru_download: Option<bool>, pub lazy_slru_download: Option<bool>,
pub timeline_get_throttle: Option<ThrottleConfig>, pub timeline_get_throttle: Option<ThrottleConfig>,
pub image_layer_creation_check_threshold: Option<u8>,
pub switch_to_aux_file_v2: Option<bool>,
} }
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
@@ -430,6 +427,7 @@ pub struct StatusResponse {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
#[serde(deny_unknown_fields)] #[serde(deny_unknown_fields)]
pub struct TenantLocationConfigRequest { pub struct TenantLocationConfigRequest {
pub tenant_id: Option<TenantShardId>,
#[serde(flatten)] #[serde(flatten)]
pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it
} }
@@ -747,18 +745,10 @@ pub struct TimelineGcRequest {
pub gc_horizon: Option<u64>, pub gc_horizon: Option<u64>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalRedoManagerProcessStatus {
pub pid: u32,
/// The strum-generated `into::<&'static str>()` for `pageserver::walredo::ProcessKind`.
/// `ProcessKind` are a transitory thing, so, they have no enum representation in `pageserver_api`.
pub kind: Cow<'static, str>,
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalRedoManagerStatus { pub struct WalRedoManagerStatus {
pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>, pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>,
pub process: Option<WalRedoManagerProcessStatus>, pub pid: Option<u32>,
} }
/// The progress of a secondary tenant is mostly useful when doing a long running download: e.g. initiating /// The progress of a secondary tenant is mostly useful when doing a long running download: e.g. initiating
@@ -767,7 +757,11 @@ pub struct WalRedoManagerStatus {
#[derive(Default, Debug, Serialize, Deserialize, Clone)] #[derive(Default, Debug, Serialize, Deserialize, Clone)]
pub struct SecondaryProgress { pub struct SecondaryProgress {
/// The remote storage LastModified time of the heatmap object we last downloaded. /// The remote storage LastModified time of the heatmap object we last downloaded.
pub heatmap_mtime: Option<serde_system_time::SystemTime>, #[serde(
serialize_with = "opt_ser_rfc3339_millis",
deserialize_with = "opt_deser_rfc3339_millis"
)]
pub heatmap_mtime: Option<SystemTime>,
/// The number of layers currently on-disk /// The number of layers currently on-disk
pub layers_downloaded: usize, pub layers_downloaded: usize,
@@ -780,15 +774,27 @@ pub struct SecondaryProgress {
pub bytes_total: u64, pub bytes_total: u64,
} }
#[derive(Serialize, Deserialize, Debug)] fn opt_ser_rfc3339_millis<S: serde::Serializer>(
pub struct TenantScanRemoteStorageShard { ts: &Option<SystemTime>,
pub tenant_shard_id: TenantShardId, serializer: S,
pub generation: Option<u32>, ) -> Result<S::Ok, S::Error> {
match ts {
Some(ts) => serializer.collect_str(&humantime::format_rfc3339_millis(*ts)),
None => serializer.serialize_none(),
}
} }
#[derive(Serialize, Deserialize, Debug, Default)] fn opt_deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<Option<SystemTime>, D::Error>
pub struct TenantScanRemoteStorageResponse { where
pub shards: Vec<TenantScanRemoteStorageShard>, D: serde::de::Deserializer<'de>,
{
let s: Option<String> = serde::de::Deserialize::deserialize(deserializer)?;
match s {
None => Ok(None),
Some(s) => humantime::parse_rfc3339(&s)
.map_err(serde::de::Error::custom)
.map(Some),
}
} }
pub mod virtual_file { pub mod virtual_file {
@@ -858,72 +864,39 @@ impl TryFrom<u8> for PagestreamBeMessageTag {
} }
} }
// In the V2 protocol version, a GetPage request contains two LSN values:
//
// request_lsn: Get the page version at this point in time. Lsn::Max is a special value that means
// "get the latest version present". It's used by the primary server, which knows that no one else
// is writing WAL. 'not_modified_since' must be set to a proper value even if request_lsn is
// Lsn::Max. Standby servers use the current replay LSN as the request LSN.
//
// not_modified_since: Hint to the pageserver that the client knows that the page has not been
// modified between 'not_modified_since' and the request LSN. It's always correct to set
// 'not_modified_since equal' to 'request_lsn' (unless Lsn::Max is used as the 'request_lsn'), but
// passing an earlier LSN can speed up the request, by allowing the pageserver to process the
// request without waiting for 'request_lsn' to arrive.
//
// The legacy V1 interface contained only one LSN, and a boolean 'latest' flag. The V1 interface was
// sufficient for the primary; the 'lsn' was equivalent to the 'not_modified_since' value, and
// 'latest' was set to true. The V2 interface was added because there was no correct way for a
// standby to request a page at a particular non-latest LSN, and also include the
// 'not_modified_since' hint. That led to an awkward choice of either using an old LSN in the
// request, if the standby knows that the page hasn't been modified since, and risk getting an error
// if that LSN has fallen behind the GC horizon, or requesting the current replay LSN, which could
// require the pageserver unnecessarily to wait for the WAL to arrive up to that point. The new V2
// interface allows sending both LSNs, and let the pageserver do the right thing. There is no
// difference in the responses between V1 and V2.
//
// The Request structs below reflect the V2 interface. If V1 is used, the parse function
// maps the old format requests to the new format.
//
#[derive(Clone, Copy)]
pub enum PagestreamProtocolVersion {
V1,
V2,
}
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PagestreamExistsRequest { pub struct PagestreamExistsRequest {
pub request_lsn: Lsn, pub latest: bool,
pub not_modified_since: Lsn, pub lsn: Lsn,
pub rel: RelTag, pub rel: RelTag,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PagestreamNblocksRequest { pub struct PagestreamNblocksRequest {
pub request_lsn: Lsn, pub latest: bool,
pub not_modified_since: Lsn, pub lsn: Lsn,
pub rel: RelTag, pub rel: RelTag,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PagestreamGetPageRequest { pub struct PagestreamGetPageRequest {
pub request_lsn: Lsn, pub latest: bool,
pub not_modified_since: Lsn, pub lsn: Lsn,
pub rel: RelTag, pub rel: RelTag,
pub blkno: u32, pub blkno: u32,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PagestreamDbSizeRequest { pub struct PagestreamDbSizeRequest {
pub request_lsn: Lsn, pub latest: bool,
pub not_modified_since: Lsn, pub lsn: Lsn,
pub dbnode: u32, pub dbnode: u32,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PagestreamGetSlruSegmentRequest { pub struct PagestreamGetSlruSegmentRequest {
pub request_lsn: Lsn, pub latest: bool,
pub not_modified_since: Lsn, pub lsn: Lsn,
pub kind: u8, pub kind: u8,
pub segno: u32, pub segno: u32,
} }
@@ -970,16 +943,14 @@ pub struct TenantHistorySize {
} }
impl PagestreamFeMessage { impl PagestreamFeMessage {
/// Serialize a compute -> pageserver message. This is currently only used in testing
/// tools. Always uses protocol version 2.
pub fn serialize(&self) -> Bytes { pub fn serialize(&self) -> Bytes {
let mut bytes = BytesMut::new(); let mut bytes = BytesMut::new();
match self { match self {
Self::Exists(req) => { Self::Exists(req) => {
bytes.put_u8(0); bytes.put_u8(0);
bytes.put_u64(req.request_lsn.0); bytes.put_u8(u8::from(req.latest));
bytes.put_u64(req.not_modified_since.0); bytes.put_u64(req.lsn.0);
bytes.put_u32(req.rel.spcnode); bytes.put_u32(req.rel.spcnode);
bytes.put_u32(req.rel.dbnode); bytes.put_u32(req.rel.dbnode);
bytes.put_u32(req.rel.relnode); bytes.put_u32(req.rel.relnode);
@@ -988,8 +959,8 @@ impl PagestreamFeMessage {
Self::Nblocks(req) => { Self::Nblocks(req) => {
bytes.put_u8(1); bytes.put_u8(1);
bytes.put_u64(req.request_lsn.0); bytes.put_u8(u8::from(req.latest));
bytes.put_u64(req.not_modified_since.0); bytes.put_u64(req.lsn.0);
bytes.put_u32(req.rel.spcnode); bytes.put_u32(req.rel.spcnode);
bytes.put_u32(req.rel.dbnode); bytes.put_u32(req.rel.dbnode);
bytes.put_u32(req.rel.relnode); bytes.put_u32(req.rel.relnode);
@@ -998,8 +969,8 @@ impl PagestreamFeMessage {
Self::GetPage(req) => { Self::GetPage(req) => {
bytes.put_u8(2); bytes.put_u8(2);
bytes.put_u64(req.request_lsn.0); bytes.put_u8(u8::from(req.latest));
bytes.put_u64(req.not_modified_since.0); bytes.put_u64(req.lsn.0);
bytes.put_u32(req.rel.spcnode); bytes.put_u32(req.rel.spcnode);
bytes.put_u32(req.rel.dbnode); bytes.put_u32(req.rel.dbnode);
bytes.put_u32(req.rel.relnode); bytes.put_u32(req.rel.relnode);
@@ -1009,15 +980,15 @@ impl PagestreamFeMessage {
Self::DbSize(req) => { Self::DbSize(req) => {
bytes.put_u8(3); bytes.put_u8(3);
bytes.put_u64(req.request_lsn.0); bytes.put_u8(u8::from(req.latest));
bytes.put_u64(req.not_modified_since.0); bytes.put_u64(req.lsn.0);
bytes.put_u32(req.dbnode); bytes.put_u32(req.dbnode);
} }
Self::GetSlruSegment(req) => { Self::GetSlruSegment(req) => {
bytes.put_u8(4); bytes.put_u8(4);
bytes.put_u64(req.request_lsn.0); bytes.put_u8(u8::from(req.latest));
bytes.put_u64(req.not_modified_since.0); bytes.put_u64(req.lsn.0);
bytes.put_u8(req.kind); bytes.put_u8(req.kind);
bytes.put_u32(req.segno); bytes.put_u32(req.segno);
} }
@@ -1026,40 +997,18 @@ impl PagestreamFeMessage {
bytes.into() bytes.into()
} }
pub fn parse<R: std::io::Read>( pub fn parse<R: std::io::Read>(body: &mut R) -> anyhow::Result<PagestreamFeMessage> {
body: &mut R, // TODO these gets can fail
protocol_version: PagestreamProtocolVersion,
) -> anyhow::Result<PagestreamFeMessage> {
// these correspond to the NeonMessageTag enum in pagestore_client.h // these correspond to the NeonMessageTag enum in pagestore_client.h
// //
// TODO: consider using protobuf or serde bincode for less error prone // TODO: consider using protobuf or serde bincode for less error prone
// serialization. // serialization.
let msg_tag = body.read_u8()?; let msg_tag = body.read_u8()?;
let (request_lsn, not_modified_since) = match protocol_version {
PagestreamProtocolVersion::V2 => (
Lsn::from(body.read_u64::<BigEndian>()?),
Lsn::from(body.read_u64::<BigEndian>()?),
),
PagestreamProtocolVersion::V1 => {
// In the old protocol, each message starts with a boolean 'latest' flag,
// followed by 'lsn'. Convert that to the two LSNs, 'request_lsn' and
// 'not_modified_since', used in the new protocol version.
let latest = body.read_u8()? != 0;
let request_lsn = Lsn::from(body.read_u64::<BigEndian>()?);
if latest {
(Lsn::MAX, request_lsn) // get latest version
} else {
(request_lsn, request_lsn) // get version at specified LSN
}
}
};
// The rest of the messages are the same between V1 and V2
match msg_tag { match msg_tag {
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest { 0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
request_lsn, latest: body.read_u8()? != 0,
not_modified_since, lsn: Lsn::from(body.read_u64::<BigEndian>()?),
rel: RelTag { rel: RelTag {
spcnode: body.read_u32::<BigEndian>()?, spcnode: body.read_u32::<BigEndian>()?,
dbnode: body.read_u32::<BigEndian>()?, dbnode: body.read_u32::<BigEndian>()?,
@@ -1068,8 +1017,8 @@ impl PagestreamFeMessage {
}, },
})), })),
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { 1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
request_lsn, latest: body.read_u8()? != 0,
not_modified_since, lsn: Lsn::from(body.read_u64::<BigEndian>()?),
rel: RelTag { rel: RelTag {
spcnode: body.read_u32::<BigEndian>()?, spcnode: body.read_u32::<BigEndian>()?,
dbnode: body.read_u32::<BigEndian>()?, dbnode: body.read_u32::<BigEndian>()?,
@@ -1078,8 +1027,8 @@ impl PagestreamFeMessage {
}, },
})), })),
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest { 2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
request_lsn, latest: body.read_u8()? != 0,
not_modified_since, lsn: Lsn::from(body.read_u64::<BigEndian>()?),
rel: RelTag { rel: RelTag {
spcnode: body.read_u32::<BigEndian>()?, spcnode: body.read_u32::<BigEndian>()?,
dbnode: body.read_u32::<BigEndian>()?, dbnode: body.read_u32::<BigEndian>()?,
@@ -1089,14 +1038,14 @@ impl PagestreamFeMessage {
blkno: body.read_u32::<BigEndian>()?, blkno: body.read_u32::<BigEndian>()?,
})), })),
3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { 3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
request_lsn, latest: body.read_u8()? != 0,
not_modified_since, lsn: Lsn::from(body.read_u64::<BigEndian>()?),
dbnode: body.read_u32::<BigEndian>()?, dbnode: body.read_u32::<BigEndian>()?,
})), })),
4 => Ok(PagestreamFeMessage::GetSlruSegment( 4 => Ok(PagestreamFeMessage::GetSlruSegment(
PagestreamGetSlruSegmentRequest { PagestreamGetSlruSegmentRequest {
request_lsn, latest: body.read_u8()? != 0,
not_modified_since, lsn: Lsn::from(body.read_u64::<BigEndian>()?),
kind: body.read_u8()?, kind: body.read_u8()?,
segno: body.read_u32::<BigEndian>()?, segno: body.read_u32::<BigEndian>()?,
}, },
@@ -1224,8 +1173,8 @@ mod tests {
// Test serialization/deserialization of PagestreamFeMessage // Test serialization/deserialization of PagestreamFeMessage
let messages = vec![ let messages = vec![
PagestreamFeMessage::Exists(PagestreamExistsRequest { PagestreamFeMessage::Exists(PagestreamExistsRequest {
request_lsn: Lsn(4), latest: true,
not_modified_since: Lsn(3), lsn: Lsn(4),
rel: RelTag { rel: RelTag {
forknum: 1, forknum: 1,
spcnode: 2, spcnode: 2,
@@ -1234,8 +1183,8 @@ mod tests {
}, },
}), }),
PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
request_lsn: Lsn(4), latest: false,
not_modified_since: Lsn(4), lsn: Lsn(4),
rel: RelTag { rel: RelTag {
forknum: 1, forknum: 1,
spcnode: 2, spcnode: 2,
@@ -1244,8 +1193,8 @@ mod tests {
}, },
}), }),
PagestreamFeMessage::GetPage(PagestreamGetPageRequest { PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
request_lsn: Lsn(4), latest: true,
not_modified_since: Lsn(3), lsn: Lsn(4),
rel: RelTag { rel: RelTag {
forknum: 1, forknum: 1,
spcnode: 2, spcnode: 2,
@@ -1255,16 +1204,14 @@ mod tests {
blkno: 7, blkno: 7,
}), }),
PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
request_lsn: Lsn(4), latest: true,
not_modified_since: Lsn(3), lsn: Lsn(4),
dbnode: 7, dbnode: 7,
}), }),
]; ];
for msg in messages { for msg in messages {
let bytes = msg.serialize(); let bytes = msg.serialize();
let reconstructed = let reconstructed = PagestreamFeMessage::parse(&mut bytes.reader()).unwrap();
PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V2)
.unwrap();
assert!(msg == reconstructed); assert!(msg == reconstructed);
} }
} }

View File

@@ -1,11 +1,9 @@
use utils::lsn::Lsn; use utils::lsn::Lsn;
use crate::keyspace::SparseKeySpace;
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct Partitioning { pub struct Partitioning {
pub keys: crate::keyspace::KeySpace, pub keys: crate::keyspace::KeySpace,
pub sparse_keys: crate::keyspace::SparseKeySpace,
pub at_lsn: Lsn, pub at_lsn: Lsn,
} }
@@ -34,8 +32,6 @@ impl serde::Serialize for Partitioning {
let mut map = serializer.serialize_map(Some(2))?; let mut map = serializer.serialize_map(Some(2))?;
map.serialize_key("keys")?; map.serialize_key("keys")?;
map.serialize_value(&KeySpace(&self.keys))?; map.serialize_value(&KeySpace(&self.keys))?;
map.serialize_key("sparse_keys")?;
map.serialize_value(&KeySpace(&self.sparse_keys.0))?;
map.serialize_key("at_lsn")?; map.serialize_key("at_lsn")?;
map.serialize_value(&WithDisplay(&self.at_lsn))?; map.serialize_value(&WithDisplay(&self.at_lsn))?;
map.end() map.end()
@@ -103,7 +99,6 @@ impl<'a> serde::Deserialize<'a> for Partitioning {
#[derive(serde::Deserialize)] #[derive(serde::Deserialize)]
struct De { struct De {
keys: KeySpace, keys: KeySpace,
sparse_keys: KeySpace,
#[serde_as(as = "serde_with::DisplayFromStr")] #[serde_as(as = "serde_with::DisplayFromStr")]
at_lsn: Lsn, at_lsn: Lsn,
} }
@@ -112,7 +107,6 @@ impl<'a> serde::Deserialize<'a> for Partitioning {
Ok(Self { Ok(Self {
at_lsn: de.at_lsn, at_lsn: de.at_lsn,
keys: de.keys.0, keys: de.keys.0,
sparse_keys: SparseKeySpace(de.sparse_keys.0),
}) })
} }
} }
@@ -139,12 +133,6 @@ mod tests {
"030000000000000000000000000000000003" "030000000000000000000000000000000003"
] ]
], ],
"sparse_keys": [
[
"620000000000000000000000000000000000",
"620000000000000000000000000000000003"
]
],
"at_lsn": "0/2240160" "at_lsn": "0/2240160"
} }
"#; "#;

View File

@@ -1,4 +1,4 @@
use utils::serde_system_time::SystemTime; use std::time::SystemTime;
/// Pageserver current utilization and scoring for how good candidate the pageserver would be for /// Pageserver current utilization and scoring for how good candidate the pageserver would be for
/// the next tenant. /// the next tenant.
@@ -21,9 +21,28 @@ pub struct PageserverUtilization {
/// When was this snapshot captured, pageserver local time. /// When was this snapshot captured, pageserver local time.
/// ///
/// Use millis to give confidence that the value is regenerated often enough. /// Use millis to give confidence that the value is regenerated often enough.
#[serde(
serialize_with = "ser_rfc3339_millis",
deserialize_with = "deser_rfc3339_millis"
)]
pub captured_at: SystemTime, pub captured_at: SystemTime,
} }
fn ser_rfc3339_millis<S: serde::Serializer>(
ts: &SystemTime,
serializer: S,
) -> Result<S::Ok, S::Error> {
serializer.collect_str(&humantime::format_rfc3339_millis(*ts))
}
fn deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<SystemTime, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let s: String = serde::de::Deserialize::deserialize(deserializer)?;
humantime::parse_rfc3339(&s).map_err(serde::de::Error::custom)
}
/// openapi knows only `format: int64`, so avoid outputting a non-parseable value by generated clients. /// openapi knows only `format: int64`, so avoid outputting a non-parseable value by generated clients.
/// ///
/// Instead of newtype, use this because a newtype would get require handling deserializing values /// Instead of newtype, use this because a newtype would get require handling deserializing values
@@ -50,9 +69,7 @@ mod tests {
disk_usage_bytes: u64::MAX, disk_usage_bytes: u64::MAX,
free_space_bytes: 0, free_space_bytes: 0,
utilization_score: u64::MAX, utilization_score: u64::MAX,
captured_at: SystemTime( captured_at: SystemTime::UNIX_EPOCH + Duration::from_secs(1708509779),
std::time::SystemTime::UNIX_EPOCH + Duration::from_secs(1708509779),
),
}; };
let s = serde_json::to_string(&doc).unwrap(); let s = serde_json::to_string(&doc).unwrap();

View File

@@ -5,93 +5,15 @@ use crate::{
models::ShardParameters, models::ShardParameters,
}; };
use hex::FromHex; use hex::FromHex;
use postgres_ffi::relfile_utils::INIT_FORKNUM;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::id::TenantId; use utils::id::TenantId;
/// See docs/rfcs/031-sharding-static.md for an overview of sharding.
///
/// This module contains a variety of types used to represent the concept of sharding
/// a Neon tenant across multiple physical shards. Since there are quite a few of these,
/// we provide an summary here.
///
/// Types used to describe shards:
/// - [`ShardCount`] describes how many shards make up a tenant, plus the magic `unsharded` value
/// which identifies a tenant which is not shard-aware. This means its storage paths do not include
/// a shard suffix.
/// - [`ShardNumber`] is simply the zero-based index of a shard within a tenant.
/// - [`ShardIndex`] is the 2-tuple of `ShardCount` and `ShardNumber`, it's just like a `TenantShardId`
/// without the tenant ID. This is useful for things that are implicitly scoped to a particular
/// tenant, such as layer files.
/// - [`ShardIdentity`]` is the full description of a particular shard's parameters, in sufficient
/// detail to convert a [`Key`] to a [`ShardNumber`] when deciding where to write/read.
/// - The [`ShardSlug`] is a terse formatter for ShardCount and ShardNumber, written as
/// four hex digits. An unsharded tenant is `0000`.
/// - [`TenantShardId`] is the unique ID of a particular shard within a particular tenant
///
/// Types used to describe the parameters for data distribution in a sharded tenant:
/// - [`ShardStripeSize`] controls how long contiguous runs of [`Key`]s (stripes) are when distributed across
/// multiple shards. Its value is given in 8kiB pages.
/// - [`ShardLayout`] describes the data distribution scheme, and at time of writing is
/// always zero: this is provided for future upgrades that might introduce different
/// data distribution schemes.
///
/// Examples:
/// - A legacy unsharded tenant has one shard with ShardCount(0), ShardNumber(0), and its slug is 0000
/// - A single sharded tenant has one shard with ShardCount(1), ShardNumber(0), and its slug is 0001
/// - In a tenant with 4 shards, each shard has ShardCount(N), ShardNumber(i) where i in 0..N-1 (inclusive),
/// and their slugs are 0004, 0104, 0204, and 0304.
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)] #[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
pub struct ShardNumber(pub u8); pub struct ShardNumber(pub u8);
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)] #[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
pub struct ShardCount(u8); pub struct ShardCount(u8);
/// Combination of ShardNumber and ShardCount. For use within the context of a particular tenant,
/// when we need to know which shard we're dealing with, but do not need to know the full
/// ShardIdentity (because we won't be doing any page->shard mapping), and do not need to know
/// the fully qualified TenantShardId.
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
pub struct ShardIndex {
pub shard_number: ShardNumber,
pub shard_count: ShardCount,
}
/// The ShardIdentity contains enough information to map a [`Key`] to a [`ShardNumber`],
/// and to check whether that [`ShardNumber`] is the same as the current shard.
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
pub struct ShardIdentity {
pub number: ShardNumber,
pub count: ShardCount,
pub stripe_size: ShardStripeSize,
layout: ShardLayout,
}
/// Formatting helper, for generating the `shard_id` label in traces.
struct ShardSlug<'a>(&'a TenantShardId);
/// TenantShardId globally identifies a particular shard in a particular tenant.
///
/// These are written as `<TenantId>-<ShardSlug>`, for example:
/// # The second shard in a two-shard tenant
/// 072f1291a5310026820b2fe4b2968934-0102
///
/// If the `ShardCount` is _unsharded_, the `TenantShardId` is written without
/// a shard suffix and is equivalent to the encoding of a `TenantId`: this enables
/// an unsharded [`TenantShardId`] to be used interchangably with a [`TenantId`].
///
/// The human-readable encoding of an unsharded TenantShardId, such as used in API URLs,
/// is both forward and backward compatible with TenantId: a legacy TenantId can be
/// decoded as a TenantShardId, and when re-encoded it will be parseable
/// as a TenantId.
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
pub struct TenantShardId {
pub tenant_id: TenantId,
pub shard_number: ShardNumber,
pub shard_count: ShardCount,
}
impl ShardCount { impl ShardCount {
pub const MAX: Self = Self(u8::MAX); pub const MAX: Self = Self(u8::MAX);
@@ -116,7 +38,6 @@ impl ShardCount {
self.0 self.0
} }
///
pub fn is_unsharded(&self) -> bool { pub fn is_unsharded(&self) -> bool {
self.0 == 0 self.0 == 0
} }
@@ -132,6 +53,33 @@ impl ShardNumber {
pub const MAX: Self = Self(u8::MAX); pub const MAX: Self = Self(u8::MAX);
} }
/// TenantShardId identify the units of work for the Pageserver.
///
/// These are written as `<tenant_id>-<shard number><shard-count>`, for example:
///
/// # The second shard in a two-shard tenant
/// 072f1291a5310026820b2fe4b2968934-0102
///
/// Historically, tenants could not have multiple shards, and were identified
/// by TenantId. To support this, TenantShardId has a special legacy
/// mode where `shard_count` is equal to zero: this represents a single-sharded
/// tenant which should be written as a TenantId with no suffix.
///
/// The human-readable encoding of TenantShardId, such as used in API URLs,
/// is both forward and backward compatible: a legacy TenantId can be
/// decoded as a TenantShardId, and when re-encoded it will be parseable
/// as a TenantId.
///
/// Note that the binary encoding is _not_ backward compatible, because
/// at the time sharding is introduced, there are no existing binary structures
/// containing TenantId that we need to handle.
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
pub struct TenantShardId {
pub tenant_id: TenantId,
pub shard_number: ShardNumber,
pub shard_count: ShardCount,
}
impl TenantShardId { impl TenantShardId {
pub fn unsharded(tenant_id: TenantId) -> Self { pub fn unsharded(tenant_id: TenantId) -> Self {
Self { Self {
@@ -163,13 +111,10 @@ impl TenantShardId {
} }
/// Convenience for code that has special behavior on the 0th shard. /// Convenience for code that has special behavior on the 0th shard.
pub fn is_shard_zero(&self) -> bool { pub fn is_zero(&self) -> bool {
self.shard_number == ShardNumber(0) self.shard_number == ShardNumber(0)
} }
/// The "unsharded" value is distinct from simply having a single shard: it represents
/// a tenant which is not shard-aware at all, and whose storage paths will not include
/// a shard suffix.
pub fn is_unsharded(&self) -> bool { pub fn is_unsharded(&self) -> bool {
self.shard_number == ShardNumber(0) && self.shard_count.is_unsharded() self.shard_number == ShardNumber(0) && self.shard_count.is_unsharded()
} }
@@ -205,6 +150,9 @@ impl TenantShardId {
} }
} }
/// Formatting helper
struct ShardSlug<'a>(&'a TenantShardId);
impl<'a> std::fmt::Display for ShardSlug<'a> { impl<'a> std::fmt::Display for ShardSlug<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!( write!(
@@ -274,6 +222,16 @@ impl From<[u8; 18]> for TenantShardId {
} }
} }
/// For use within the context of a particular tenant, when we need to know which
/// shard we're dealing with, but do not need to know the full ShardIdentity (because
/// we won't be doing any page->shard mapping), and do not need to know the fully qualified
/// TenantShardId.
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
pub struct ShardIndex {
pub shard_number: ShardNumber,
pub shard_count: ShardCount,
}
impl ShardIndex { impl ShardIndex {
pub fn new(number: ShardNumber, count: ShardCount) -> Self { pub fn new(number: ShardNumber, count: ShardCount) -> Self {
Self { Self {
@@ -288,9 +246,6 @@ impl ShardIndex {
} }
} }
/// The "unsharded" value is distinct from simply having a single shard: it represents
/// a tenant which is not shard-aware at all, and whose storage paths will not include
/// a shard suffix.
pub fn is_unsharded(&self) -> bool { pub fn is_unsharded(&self) -> bool {
self.shard_number == ShardNumber(0) && self.shard_count == ShardCount(0) self.shard_number == ShardNumber(0) && self.shard_count == ShardCount(0)
} }
@@ -358,8 +313,6 @@ impl Serialize for TenantShardId {
if serializer.is_human_readable() { if serializer.is_human_readable() {
serializer.collect_str(self) serializer.collect_str(self)
} else { } else {
// Note: while human encoding of [`TenantShardId`] is backward and forward
// compatible, this binary encoding is not.
let mut packed: [u8; 18] = [0; 18]; let mut packed: [u8; 18] = [0; 18];
packed[0..16].clone_from_slice(&self.tenant_id.as_arr()); packed[0..16].clone_from_slice(&self.tenant_id.as_arr());
packed[16] = self.shard_number.0; packed[16] = self.shard_number.0;
@@ -437,6 +390,16 @@ const LAYOUT_BROKEN: ShardLayout = ShardLayout(255);
/// Default stripe size in pages: 256MiB divided by 8kiB page size. /// Default stripe size in pages: 256MiB divided by 8kiB page size.
const DEFAULT_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(256 * 1024 / 8); const DEFAULT_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(256 * 1024 / 8);
/// The ShardIdentity contains the information needed for one member of map
/// to resolve a key to a shard, and then check whether that shard is ==self.
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
pub struct ShardIdentity {
pub number: ShardNumber,
pub count: ShardCount,
pub stripe_size: ShardStripeSize,
layout: ShardLayout,
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)] #[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum ShardConfigError { pub enum ShardConfigError {
#[error("Invalid shard count")] #[error("Invalid shard count")]
@@ -451,7 +414,7 @@ impl ShardIdentity {
/// An identity with number=0 count=0 is a "none" identity, which represents legacy /// An identity with number=0 count=0 is a "none" identity, which represents legacy
/// tenants. Modern single-shard tenants should not use this: they should /// tenants. Modern single-shard tenants should not use this: they should
/// have number=0 count=1. /// have number=0 count=1.
pub const fn unsharded() -> Self { pub fn unsharded() -> Self {
Self { Self {
number: ShardNumber(0), number: ShardNumber(0),
count: ShardCount(0), count: ShardCount(0),
@@ -476,9 +439,6 @@ impl ShardIdentity {
} }
} }
/// The "unsharded" value is distinct from simply having a single shard: it represents
/// a tenant which is not shard-aware at all, and whose storage paths will not include
/// a shard suffix.
pub fn is_unsharded(&self) -> bool { pub fn is_unsharded(&self) -> bool {
self.number == ShardNumber(0) && self.count == ShardCount(0) self.number == ShardNumber(0) && self.count == ShardCount(0)
} }
@@ -527,8 +487,6 @@ impl ShardIdentity {
} }
/// Return true if the key should be ingested by this shard /// Return true if the key should be ingested by this shard
///
/// Shards must ingest _at least_ keys which return true from this check.
pub fn is_key_local(&self, key: &Key) -> bool { pub fn is_key_local(&self, key: &Key) -> bool {
assert!(!self.is_broken()); assert!(!self.is_broken());
if self.count < ShardCount(2) || (key_is_shard0(key) && self.number == ShardNumber(0)) { if self.count < ShardCount(2) || (key_is_shard0(key) && self.number == ShardNumber(0)) {
@@ -539,9 +497,7 @@ impl ShardIdentity {
} }
/// Return true if the key should be discarded if found in this shard's /// Return true if the key should be discarded if found in this shard's
/// data store, e.g. during compaction after a split. /// data store, e.g. during compaction after a split
///
/// Shards _may_ drop keys which return false here, but are not obliged to.
pub fn is_key_disposable(&self, key: &Key) -> bool { pub fn is_key_disposable(&self, key: &Key) -> bool {
if key_is_shard0(key) { if key_is_shard0(key) {
// Q: Why can't we dispose of shard0 content if we're not shard 0? // Q: Why can't we dispose of shard0 content if we're not shard 0?
@@ -567,7 +523,7 @@ impl ShardIdentity {
/// Convenience for checking if this identity is the 0th shard in a tenant, /// Convenience for checking if this identity is the 0th shard in a tenant,
/// for special cases on shard 0 such as ingesting relation sizes. /// for special cases on shard 0 such as ingesting relation sizes.
pub fn is_shard_zero(&self) -> bool { pub fn is_zero(&self) -> bool {
self.number == ShardNumber(0) self.number == ShardNumber(0)
} }
} }
@@ -650,13 +606,7 @@ fn key_is_shard0(key: &Key) -> bool {
// relation pages are distributed to shards other than shard zero. Everything else gets // relation pages are distributed to shards other than shard zero. Everything else gets
// stored on shard 0. This guarantees that shard 0 can independently serve basebackup // stored on shard 0. This guarantees that shard 0 can independently serve basebackup
// requests, and any request other than those for particular blocks in relations. // requests, and any request other than those for particular blocks in relations.
// !is_rel_block_key(key)
// The only exception to this rule is "initfork" data -- this relates to postgres's UNLOGGED table
// type. These are special relations, usually with only 0 or 1 blocks, and we store them on shard 0
// because they must be included in basebackups.
let is_initfork = key.field5 == INIT_FORKNUM;
!is_rel_block_key(key) || is_initfork
} }
/// Provide the same result as the function in postgres `hashfn.h` with the same name /// Provide the same result as the function in postgres `hashfn.h` with the same name

View File

@@ -118,9 +118,7 @@ pub use v14::bindings::{TimeLineID, TimestampTz, XLogRecPtr, XLogSegNo};
// Likewise for these, although the assumption that these don't change is a little more iffy. // Likewise for these, although the assumption that these don't change is a little more iffy.
pub use v14::bindings::{MultiXactOffset, MultiXactStatus}; pub use v14::bindings::{MultiXactOffset, MultiXactStatus};
pub use v14::bindings::{PageHeaderData, XLogRecord}; pub use v14::bindings::{PageHeaderData, XLogRecord};
pub use v14::xlog_utils::{ pub use v14::xlog_utils::{XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD};
XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
};
pub use v14::bindings::{CheckPoint, ControlFileData}; pub use v14::bindings::{CheckPoint, ControlFileData};

View File

@@ -4,9 +4,7 @@ use log::*;
use postgres::types::PgLsn; use postgres::types::PgLsn;
use postgres::Client; use postgres::Client;
use postgres_ffi::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ}; use postgres_ffi::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ};
use postgres_ffi::{ use postgres_ffi::{XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD};
XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@@ -264,21 +262,11 @@ fn craft_internal<C: postgres::GenericClient>(
intermediate_lsns.insert(0, initial_lsn); intermediate_lsns.insert(0, initial_lsn);
} }
// Some records may be not flushed, e.g. non-transactional logical messages. Flush now. // Some records may be not flushed, e.g. non-transactional logical messages.
// //
// If the previous WAL record ended exactly at page boundary, pg_current_wal_insert_lsn // Note: this is broken if pg_current_wal_insert_lsn is at page boundary
// returns the position just after the page header on the next page. That's where the next // because pg_current_wal_insert_lsn skips page headers.
// record will be inserted. But the page header hasn't actually been written to the WAL client.execute("select neon_xlogflush(pg_current_wal_insert_lsn())", &[])?;
// yet, and if you try to flush it, you get a "request to flush past end of generated WAL"
// error. Because of that, if the insert location is just after a page header, back off to
// previous page boundary.
let mut lsn = u64::from(client.pg_current_wal_insert_lsn()?);
if lsn % WAL_SEGMENT_SIZE as u64 == XLOG_SIZE_OF_XLOG_LONG_PHD as u64 {
lsn -= XLOG_SIZE_OF_XLOG_LONG_PHD as u64;
} else if lsn % XLOG_BLCKSZ as u64 == XLOG_SIZE_OF_XLOG_SHORT_PHD as u64 {
lsn -= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64;
}
client.execute("select neon_xlogflush($1)", &[&PgLsn::from(lsn)])?;
Ok(intermediate_lsns) Ok(intermediate_lsns)
} }
@@ -332,49 +320,38 @@ impl Crafter for LastWalRecordXlogSwitchEndsOnPageBoundary {
client.execute("CREATE table t(x int)", &[])?; client.execute("CREATE table t(x int)", &[])?;
// Add padding so the XLOG_SWITCH record ends exactly on XLOG_BLCKSZ boundary. We // Add padding so the XLOG_SWITCH record ends exactly on XLOG_BLCKSZ boundary.
// will use carefully-sized logical messages to advance WAL insert location such // We will use logical message as the padding. We start with detecting how much WAL
// that there is just enough space on the page for the XLOG_SWITCH record. // it takes for one logical message, considering all alignments and headers.
loop { let base_wal_advance = {
// We start with measuring how much WAL it takes for one logical message,
// considering all alignments and headers.
let before_lsn = client.pg_current_wal_insert_lsn()?; let before_lsn = client.pg_current_wal_insert_lsn()?;
// Small non-empty message bigger than few bytes is more likely than an empty
// message to have the same format as the big padding message.
client.execute( client.execute(
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', 10))", "SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', 10))",
&[], &[],
)?; )?;
let after_lsn = client.pg_current_wal_insert_lsn()?; // The XLOG_SWITCH record has no data => its size is exactly XLOG_SIZE_OF_XLOG_RECORD.
(u64::from(client.pg_current_wal_insert_lsn()?) - u64::from(before_lsn)) as usize
// Did the record cross a page boundary? If it did, start over. Crossing a + XLOG_SIZE_OF_XLOG_RECORD
// page boundary adds to the apparent size of the record because of the page };
// header, which throws off the calculation. let mut remaining_lsn =
if u64::from(before_lsn) / XLOG_BLCKSZ as u64 XLOG_BLCKSZ - u64::from(client.pg_current_wal_insert_lsn()?) as usize % XLOG_BLCKSZ;
!= u64::from(after_lsn) / XLOG_BLCKSZ as u64 if remaining_lsn < base_wal_advance {
{ remaining_lsn += XLOG_BLCKSZ;
continue;
}
// base_size is the size of a logical message without the payload
let base_size = u64::from(after_lsn) - u64::from(before_lsn) - 10;
// Is there enough space on the page for another logical message and an
// XLOG_SWITCH? If not, start over.
let page_remain = XLOG_BLCKSZ as u64 - u64::from(after_lsn) % XLOG_BLCKSZ as u64;
if page_remain < base_size - XLOG_SIZE_OF_XLOG_RECORD as u64 {
continue;
}
// We will write another logical message, such that after the logical message
// record, there will be space for exactly one XLOG_SWITCH. How large should
// the logical message's payload be? An XLOG_SWITCH record has no data => its
// size is exactly XLOG_SIZE_OF_XLOG_RECORD.
let repeats = page_remain - base_size - XLOG_SIZE_OF_XLOG_RECORD as u64;
client.execute(
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))",
&[&(repeats as i32)],
)?;
break;
} }
let repeats = 10 + remaining_lsn - base_wal_advance;
info!(
"current_wal_insert_lsn={}, remaining_lsn={}, base_wal_advance={}, repeats={}",
client.pg_current_wal_insert_lsn()?,
remaining_lsn,
base_wal_advance,
repeats
);
client.execute(
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))",
&[&(repeats as i32)],
)?;
info!( info!(
"current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}", "current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}",
client.pg_current_wal_insert_lsn()?, client.pg_current_wal_insert_lsn()?,

View File

@@ -38,7 +38,6 @@ azure_storage_blobs.workspace = true
futures-util.workspace = true futures-util.workspace = true
http-types.workspace = true http-types.workspace = true
itertools.workspace = true itertools.workspace = true
sync_wrapper = { workspace = true, features = ["futures"] }
[dev-dependencies] [dev-dependencies]
camino-tempfile.workspace = true camino-tempfile.workspace = true

View File

@@ -3,7 +3,6 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::collections::HashMap; use std::collections::HashMap;
use std::env; use std::env;
use std::io;
use std::num::NonZeroU32; use std::num::NonZeroU32;
use std::pin::Pin; use std::pin::Pin;
use std::str::FromStr; use std::str::FromStr;
@@ -21,7 +20,6 @@ use azure_storage_blobs::blob::CopyStatus;
use azure_storage_blobs::prelude::ClientBuilder; use azure_storage_blobs::prelude::ClientBuilder;
use azure_storage_blobs::{blob::operations::GetBlobBuilder, prelude::ContainerClient}; use azure_storage_blobs::{blob::operations::GetBlobBuilder, prelude::ContainerClient};
use bytes::Bytes; use bytes::Bytes;
use futures::future::Either;
use futures::stream::Stream; use futures::stream::Stream;
use futures_util::StreamExt; use futures_util::StreamExt;
use futures_util::TryStreamExt; use futures_util::TryStreamExt;
@@ -130,12 +128,12 @@ impl AzureBlobStorage {
let kind = RequestKind::Get; let kind = RequestKind::Get;
let _permit = self.permit(kind, cancel).await?; let _permit = self.permit(kind, cancel).await?;
let cancel_or_timeout = crate::support::cancel_or_timeout(self.timeout, cancel.clone());
let cancel_or_timeout_ = crate::support::cancel_or_timeout(self.timeout, cancel.clone());
let mut etag = None; let mut etag = None;
let mut last_modified = None; let mut last_modified = None;
let mut metadata = HashMap::new(); let mut metadata = HashMap::new();
// TODO give proper streaming response instead of buffering into RAM
// https://github.com/neondatabase/neon/issues/5563
let download = async { let download = async {
let response = builder let response = builder
@@ -154,46 +152,39 @@ impl AzureBlobStorage {
Err(_elapsed) => Err(DownloadError::Timeout), Err(_elapsed) => Err(DownloadError::Timeout),
}); });
let mut response = Box::pin(response); let mut response = std::pin::pin!(response);
let Some(part) = response.next().await else { let mut bufs = Vec::new();
while let Some(part) = response.next().await {
let part = part?;
if etag.is_none() {
etag = Some(part.blob.properties.etag);
}
if last_modified.is_none() {
last_modified = Some(part.blob.properties.last_modified.into());
}
if let Some(blob_meta) = part.blob.metadata {
metadata.extend(blob_meta.iter().map(|(k, v)| (k.to_owned(), v.to_owned())));
}
let data = part
.data
.collect()
.await
.map_err(|e| DownloadError::Other(e.into()))?;
bufs.push(data);
}
if bufs.is_empty() {
return Err(DownloadError::Other(anyhow::anyhow!( return Err(DownloadError::Other(anyhow::anyhow!(
"Azure GET response contained no response body" "Azure GET response contained no buffers"
))); )));
};
let part = part?;
if etag.is_none() {
etag = Some(part.blob.properties.etag);
} }
if last_modified.is_none() {
last_modified = Some(part.blob.properties.last_modified.into());
}
if let Some(blob_meta) = part.blob.metadata {
metadata.extend(blob_meta.iter().map(|(k, v)| (k.to_owned(), v.to_owned())));
}
// unwrap safety: if these were None, bufs would be empty and we would have returned an error already // unwrap safety: if these were None, bufs would be empty and we would have returned an error already
let etag = etag.unwrap(); let etag = etag.unwrap();
let last_modified = last_modified.unwrap(); let last_modified = last_modified.unwrap();
let tail_stream = response
.map(|part| match part {
Ok(part) => Either::Left(part.data.map(|r| r.map_err(io::Error::other))),
Err(e) => {
Either::Right(futures::stream::once(async { Err(io::Error::other(e)) }))
}
})
.flatten();
let stream = part
.data
.map(|r| r.map_err(io::Error::other))
.chain(sync_wrapper::SyncStream::new(tail_stream));
//.chain(SyncStream::from_pin(Box::pin(tail_stream)));
let download_stream = crate::support::DownloadStream::new(cancel_or_timeout_, stream);
Ok(Download { Ok(Download {
download_stream: Box::pin(download_stream), download_stream: Box::pin(futures::stream::iter(bufs.into_iter().map(Ok))),
etag, etag,
last_modified, last_modified,
metadata: Some(StorageMetadata(metadata)), metadata: Some(StorageMetadata(metadata)),
@@ -202,10 +193,7 @@ impl AzureBlobStorage {
tokio::select! { tokio::select! {
bufs = download => bufs, bufs = download => bufs,
cancel_or_timeout = cancel_or_timeout => match cancel_or_timeout { _ = cancel.cancelled() => Err(DownloadError::Cancelled),
TimeoutOrCancel::Timeout => Err(DownloadError::Timeout),
TimeoutOrCancel::Cancel => Err(DownloadError::Cancelled),
},
} }
} }

View File

@@ -21,13 +21,11 @@ use std::{
fmt::Debug, fmt::Debug,
num::{NonZeroU32, NonZeroUsize}, num::{NonZeroU32, NonZeroUsize},
pin::Pin, pin::Pin,
str::FromStr,
sync::Arc, sync::Arc,
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use anyhow::{bail, Context}; use anyhow::{bail, Context};
use aws_sdk_s3::types::StorageClass;
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use bytes::Bytes; use bytes::Bytes;
@@ -136,11 +134,6 @@ impl RemotePath {
pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> { pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
self.0.strip_prefix(&p.0) self.0.strip_prefix(&p.0)
} }
pub fn add_trailing_slash(&self) -> Self {
// Unwrap safety inputs are guararnteed to be valid UTF-8
Self(format!("{}/", self.0).try_into().unwrap())
}
} }
/// We don't need callers to be able to pass arbitrary delimiters: just control /// We don't need callers to be able to pass arbitrary delimiters: just control
@@ -164,21 +157,47 @@ pub struct Listing {
/// providing basic CRUD operations for storage files. /// providing basic CRUD operations for storage files.
#[allow(async_fn_in_trait)] #[allow(async_fn_in_trait)]
pub trait RemoteStorage: Send + Sync + 'static { pub trait RemoteStorage: Send + Sync + 'static {
/// List objects in remote storage, with semantics matching AWS S3's ListObjectsV2. /// Lists all top level subdirectories for a given prefix
/// (see `<https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>`) /// Note: here we assume that if the prefix is passed it was obtained via remote_object_id
/// /// which already takes into account any kind of global prefix (prefix_in_bucket for S3 or storage_root for LocalFS)
/// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not /// so this method doesnt need to.
/// from the absolute root of the bucket. async fn list_prefixes(
/// &self,
/// `mode` configures whether to use a delimiter. Without a delimiter all keys prefix: Option<&RemotePath>,
/// within the prefix are listed in the `keys` of the result. With a delimiter, any "directories" at the top level of cancel: &CancellationToken,
/// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are ) -> Result<Vec<RemotePath>, DownloadError> {
/// returned in `keys` (). let result = self
/// .list(prefix, ListingMode::WithDelimiter, None, cancel)
/// `max_keys` controls the maximum number of keys that will be returned. If this is None, this function .await?
/// will iteratively call listobjects until it runs out of keys. Note that this is not safe to use on .prefixes;
/// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure. Ok(result)
}
/// Lists all files in directory "recursively"
/// (not really recursively, because AWS has a flat namespace)
/// Note: This is subtely different than list_prefixes,
/// because it is for listing files instead of listing
/// names sharing common prefixes.
/// For example,
/// list_files("foo/bar") = ["foo/bar/cat123.txt",
/// "foo/bar/cat567.txt", "foo/bar/dog123.txt", "foo/bar/dog456.txt"]
/// whereas,
/// list_prefixes("foo/bar/") = ["cat", "dog"]
/// See `test_real_s3.rs` for more details.
/// ///
/// max_keys limits max number of keys returned; None means unlimited.
async fn list_files(
&self,
prefix: Option<&RemotePath>,
max_keys: Option<NonZeroU32>,
cancel: &CancellationToken,
) -> Result<Vec<RemotePath>, DownloadError> {
let result = self
.list(prefix, ListingMode::NoDelimiter, max_keys, cancel)
.await?
.keys;
Ok(result)
}
async fn list( async fn list(
&self, &self,
prefix: Option<&RemotePath>, prefix: Option<&RemotePath>,
@@ -317,6 +336,41 @@ impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
} }
} }
// A function for listing all the files in a "directory"
// Example:
// list_files("foo/bar") = ["foo/bar/a.txt", "foo/bar/b.txt"]
//
// max_keys limits max number of keys returned; None means unlimited.
pub async fn list_files(
&self,
folder: Option<&RemotePath>,
max_keys: Option<NonZeroU32>,
cancel: &CancellationToken,
) -> Result<Vec<RemotePath>, DownloadError> {
match self {
Self::LocalFs(s) => s.list_files(folder, max_keys, cancel).await,
Self::AwsS3(s) => s.list_files(folder, max_keys, cancel).await,
Self::AzureBlob(s) => s.list_files(folder, max_keys, cancel).await,
Self::Unreliable(s) => s.list_files(folder, max_keys, cancel).await,
}
}
// lists common *prefixes*, if any of files
// Example:
// list_prefixes("foo123","foo567","bar123","bar432") = ["foo", "bar"]
pub async fn list_prefixes(
&self,
prefix: Option<&RemotePath>,
cancel: &CancellationToken,
) -> Result<Vec<RemotePath>, DownloadError> {
match self {
Self::LocalFs(s) => s.list_prefixes(prefix, cancel).await,
Self::AwsS3(s) => s.list_prefixes(prefix, cancel).await,
Self::AzureBlob(s) => s.list_prefixes(prefix, cancel).await,
Self::Unreliable(s) => s.list_prefixes(prefix, cancel).await,
}
}
/// See [`RemoteStorage::upload`] /// See [`RemoteStorage::upload`]
pub async fn upload( pub async fn upload(
&self, &self,
@@ -511,16 +565,6 @@ impl GenericRemoteStorage {
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct StorageMetadata(HashMap<String, String>); pub struct StorageMetadata(HashMap<String, String>);
impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
fn from(arr: [(&str, &str); N]) -> Self {
let map: HashMap<String, String> = arr
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
Self(map)
}
}
/// External backup storage configuration, enough for creating a client for that storage. /// External backup storage configuration, enough for creating a client for that storage.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct RemoteStorageConfig { pub struct RemoteStorageConfig {
@@ -565,7 +609,6 @@ pub struct S3Config {
/// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details. /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
pub concurrency_limit: NonZeroUsize, pub concurrency_limit: NonZeroUsize,
pub max_keys_per_list_response: Option<i32>, pub max_keys_per_list_response: Option<i32>,
pub upload_storage_class: Option<StorageClass>,
} }
impl Debug for S3Config { impl Debug for S3Config {
@@ -694,18 +737,6 @@ impl RemoteStorageConfig {
endpoint, endpoint,
concurrency_limit, concurrency_limit,
max_keys_per_list_response, max_keys_per_list_response,
upload_storage_class: toml
.get("upload_storage_class")
.map(|prefix_in_bucket| -> anyhow::Result<_> {
let s = parse_toml_string("upload_storage_class", prefix_in_bucket)?;
let storage_class = StorageClass::from_str(&s).expect("infallible");
#[allow(deprecated)]
if matches!(storage_class, StorageClass::Unknown(_)) {
bail!("Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", StorageClass::values());
}
Ok(storage_class)
})
.transpose()?,
}) })
} }
(_, _, _, Some(_), None) => { (_, _, _, Some(_), None) => {

View File

@@ -5,9 +5,11 @@
//! volume is mounted to the local FS. //! volume is mounted to the local FS.
use std::{ use std::{
collections::HashSet, borrow::Cow,
future::Future,
io::ErrorKind, io::ErrorKind,
num::NonZeroU32, num::NonZeroU32,
pin::Pin,
time::{Duration, SystemTime, UNIX_EPOCH}, time::{Duration, SystemTime, UNIX_EPOCH},
}; };
@@ -20,11 +22,11 @@ use tokio::{
io::{self, AsyncReadExt, AsyncSeekExt, AsyncWriteExt}, io::{self, AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
}; };
use tokio_util::{io::ReaderStream, sync::CancellationToken}; use tokio_util::{io::ReaderStream, sync::CancellationToken};
use utils::crashsafe::path_with_suffix_extension; use tracing::*;
use utils::{crashsafe::path_with_suffix_extension, fs_ext::is_directory_empty};
use crate::{ use crate::{
Download, DownloadError, Listing, ListingMode, RemotePath, TimeTravelError, TimeoutOrCancel, Download, DownloadError, Listing, ListingMode, RemotePath, TimeTravelError, TimeoutOrCancel,
REMOTE_STORAGE_PREFIX_SEPARATOR,
}; };
use super::{RemoteStorage, StorageMetadata}; use super::{RemoteStorage, StorageMetadata};
@@ -91,47 +93,7 @@ impl LocalFs {
#[cfg(test)] #[cfg(test)]
async fn list_all(&self) -> anyhow::Result<Vec<RemotePath>> { async fn list_all(&self) -> anyhow::Result<Vec<RemotePath>> {
use std::{future::Future, pin::Pin}; Ok(get_all_files(&self.storage_root, true)
fn get_all_files<'a, P>(
directory_path: P,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<Utf8PathBuf>>> + Send + Sync + 'a>>
where
P: AsRef<Utf8Path> + Send + Sync + 'a,
{
Box::pin(async move {
let directory_path = directory_path.as_ref();
if directory_path.exists() {
if directory_path.is_dir() {
let mut paths = Vec::new();
let mut dir_contents = fs::read_dir(directory_path).await?;
while let Some(dir_entry) = dir_contents.next_entry().await? {
let file_type = dir_entry.file_type().await?;
let entry_path =
Utf8PathBuf::from_path_buf(dir_entry.path()).map_err(|pb| {
anyhow::Error::msg(format!(
"non-Unicode path: {}",
pb.to_string_lossy()
))
})?;
if file_type.is_symlink() {
tracing::debug!("{entry_path:?} is a symlink, skipping")
} else if file_type.is_dir() {
paths.extend(get_all_files(&entry_path).await?.into_iter())
} else {
paths.push(entry_path);
}
}
Ok(paths)
} else {
bail!("Path {directory_path:?} is not a directory")
}
} else {
Ok(Vec::new())
}
})
}
Ok(get_all_files(&self.storage_root)
.await? .await?
.into_iter() .into_iter()
.map(|path| { .map(|path| {
@@ -158,14 +120,6 @@ impl LocalFs {
// S3 object list prefixes can be arbitrary strings, but when reading // S3 object list prefixes can be arbitrary strings, but when reading
// the local filesystem we need a directory to start calling read_dir on. // the local filesystem we need a directory to start calling read_dir on.
let mut initial_dir = full_path.clone(); let mut initial_dir = full_path.clone();
// If there's no trailing slash, we have to start looking from one above: even if
// `initial_dir` is a directory, we should still list any prefixes in the parent
// that start with the same string.
if !full_path.to_string().ends_with('/') {
initial_dir.pop();
}
loop { loop {
// Did we make it to the root? // Did we make it to the root?
if initial_dir.parent().is_none() { if initial_dir.parent().is_none() {
@@ -341,66 +295,61 @@ impl RemoteStorage for LocalFs {
let op = async { let op = async {
let mut result = Listing::default(); let mut result = Listing::default();
// Filter out directories: in S3 directories don't exist, only the keys within them do. if let ListingMode::NoDelimiter = mode {
let keys = self let keys = self
.list_recursive(prefix) .list_recursive(prefix)
.await
.map_err(DownloadError::Other)?;
result.keys = keys
.into_iter()
.filter(|k| {
let path = k.with_base(&self.storage_root);
!path.is_dir()
})
.collect();
if let Some(max_keys) = max_keys {
result.keys.truncate(max_keys.get() as usize);
}
return Ok(result);
}
let path = match prefix {
Some(prefix) => Cow::Owned(prefix.with_base(&self.storage_root)),
None => Cow::Borrowed(&self.storage_root),
};
let prefixes_to_filter = get_all_files(path.as_ref(), false)
.await .await
.map_err(DownloadError::Other)?; .map_err(DownloadError::Other)?;
let keys = keys
.into_iter()
.filter(|k| {
let path = k.with_base(&self.storage_root);
!path.is_dir()
})
.collect();
if let ListingMode::NoDelimiter = mode { // filter out empty directories to mirror s3 behavior.
result.keys = keys; for prefix in prefixes_to_filter {
} else { if prefix.is_dir()
let mut prefixes = HashSet::new(); && is_directory_empty(&prefix)
for key in keys { .await
// If the part after the prefix includes a "/", take only the first part and put it in `prefixes`. .map_err(DownloadError::Other)?
let relative_key = if let Some(prefix) = prefix { {
let mut prefix = prefix.clone(); continue;
// We only strip the dirname of the prefix, so that when we strip it from the start of keys we }
// end up with full file/dir names.
let prefix_full_local_path = prefix.with_base(&self.storage_root); let stripped = prefix
let has_slash = prefix.0.to_string().ends_with('/'); .strip_prefix(&self.storage_root)
let strip_prefix = if prefix_full_local_path.is_dir() && has_slash { .context("Failed to strip prefix")
prefix .and_then(RemotePath::new)
} else { .expect(
prefix.0.pop(); "We list files for storage root, hence should be able to remote the prefix",
prefix );
};
if prefix.is_dir() {
RemotePath::new(key.strip_prefix(&strip_prefix).unwrap()).unwrap() result.prefixes.push(stripped);
} else { } else {
key result.keys.push(stripped);
};
let relative_key = format!("{}", relative_key);
if relative_key.contains(REMOTE_STORAGE_PREFIX_SEPARATOR) {
let first_part = relative_key
.split(REMOTE_STORAGE_PREFIX_SEPARATOR)
.next()
.unwrap()
.to_owned();
prefixes.insert(first_part);
} else {
result
.keys
.push(RemotePath::from_string(&relative_key).unwrap());
}
} }
result.prefixes = prefixes
.into_iter()
.map(|s| RemotePath::from_string(&s).unwrap())
.collect();
} }
if let Some(max_keys) = max_keys {
result.keys.truncate(max_keys.get() as usize);
}
Ok(result) Ok(result)
}; };
@@ -611,6 +560,50 @@ fn storage_metadata_path(original_path: &Utf8Path) -> Utf8PathBuf {
path_with_suffix_extension(original_path, "metadata") path_with_suffix_extension(original_path, "metadata")
} }
fn get_all_files<'a, P>(
directory_path: P,
recursive: bool,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<Utf8PathBuf>>> + Send + Sync + 'a>>
where
P: AsRef<Utf8Path> + Send + Sync + 'a,
{
Box::pin(async move {
let directory_path = directory_path.as_ref();
if directory_path.exists() {
if directory_path.is_dir() {
let mut paths = Vec::new();
let mut dir_contents = fs::read_dir(directory_path).await?;
while let Some(dir_entry) = dir_contents.next_entry().await? {
let file_type = dir_entry.file_type().await?;
let entry_path =
Utf8PathBuf::from_path_buf(dir_entry.path()).map_err(|pb| {
anyhow::Error::msg(format!(
"non-Unicode path: {}",
pb.to_string_lossy()
))
})?;
if file_type.is_symlink() {
debug!("{entry_path:?} is a symlink, skipping")
} else if file_type.is_dir() {
if recursive {
paths.extend(get_all_files(&entry_path, true).await?.into_iter())
} else {
paths.push(entry_path)
}
} else {
paths.push(entry_path);
}
}
Ok(paths)
} else {
bail!("Path {directory_path:?} is not a directory")
}
} else {
Ok(Vec::new())
}
})
}
async fn create_target_directory(target_file_path: &Utf8Path) -> anyhow::Result<()> { async fn create_target_directory(target_file_path: &Utf8Path) -> anyhow::Result<()> {
let target_dir = match target_file_path.parent() { let target_dir = match target_file_path.parent() {
Some(parent_dir) => parent_dir, Some(parent_dir) => parent_dir,
@@ -930,18 +923,13 @@ mod fs_tests {
// No delimiter: should recursively list everything // No delimiter: should recursively list everything
let (storage, cancel) = create_storage()?; let (storage, cancel) = create_storage()?;
let child = upload_dummy_file(&storage, "grandparent/parent/child", None, &cancel).await?; let child = upload_dummy_file(&storage, "grandparent/parent/child", None, &cancel).await?;
let child_sibling =
upload_dummy_file(&storage, "grandparent/parent/child_sibling", None, &cancel).await?;
let uncle = upload_dummy_file(&storage, "grandparent/uncle", None, &cancel).await?; let uncle = upload_dummy_file(&storage, "grandparent/uncle", None, &cancel).await?;
let listing = storage let listing = storage
.list(None, ListingMode::NoDelimiter, None, &cancel) .list(None, ListingMode::NoDelimiter, None, &cancel)
.await?; .await?;
assert!(listing.prefixes.is_empty()); assert!(listing.prefixes.is_empty());
assert_eq!( assert_eq!(listing.keys, [uncle.clone(), child.clone()].to_vec());
listing.keys.into_iter().collect::<HashSet<_>>(),
HashSet::from([uncle.clone(), child.clone(), child_sibling.clone()])
);
// Delimiter: should only go one deep // Delimiter: should only go one deep
let listing = storage let listing = storage
@@ -954,25 +942,7 @@ mod fs_tests {
); );
assert!(listing.keys.is_empty()); assert!(listing.keys.is_empty());
// Delimiter & prefix with a trailing slash // Delimiter & prefix
let listing = storage
.list(
Some(&RemotePath::from_string("timelines/some_timeline/grandparent/").unwrap()),
ListingMode::WithDelimiter,
None,
&cancel,
)
.await?;
assert_eq!(
listing.keys,
[RemotePath::from_string("uncle").unwrap()].to_vec()
);
assert_eq!(
listing.prefixes,
[RemotePath::from_string("parent").unwrap()].to_vec()
);
// Delimiter and prefix without a trailing slash
let listing = storage let listing = storage
.list( .list(
Some(&RemotePath::from_string("timelines/some_timeline/grandparent").unwrap()), Some(&RemotePath::from_string("timelines/some_timeline/grandparent").unwrap()),
@@ -981,66 +951,12 @@ mod fs_tests {
&cancel, &cancel,
) )
.await?; .await?;
assert_eq!(listing.keys, [].to_vec());
assert_eq!( assert_eq!(
listing.prefixes, listing.prefixes,
[RemotePath::from_string("grandparent").unwrap()].to_vec() [RemotePath::from_string("timelines/some_timeline/grandparent/parent").unwrap()]
); .to_vec()
// Delimiter and prefix that's partway through a path component
let listing = storage
.list(
Some(&RemotePath::from_string("timelines/some_timeline/grandp").unwrap()),
ListingMode::WithDelimiter,
None,
&cancel,
)
.await?;
assert_eq!(listing.keys, [].to_vec());
assert_eq!(
listing.prefixes,
[RemotePath::from_string("grandparent").unwrap()].to_vec()
);
Ok(())
}
#[tokio::test]
async fn list_part_component() -> anyhow::Result<()> {
// No delimiter: should recursively list everything
let (storage, cancel) = create_storage()?;
// Imitates what happens in a tenant path when we have an unsharded path and a sharded path, and do a listing
// of the unsharded path: although there is a "directory" at the unsharded path, it should be handled as
// a freeform prefix.
let _child_a =
upload_dummy_file(&storage, "grandparent/tenant-01/child", None, &cancel).await?;
let _child_b =
upload_dummy_file(&storage, "grandparent/tenant/child", None, &cancel).await?;
// Delimiter and prefix that's partway through a path component
let listing = storage
.list(
Some(
&RemotePath::from_string("timelines/some_timeline/grandparent/tenant").unwrap(),
),
ListingMode::WithDelimiter,
None,
&cancel,
)
.await?;
assert_eq!(listing.keys, [].to_vec());
let mut found_prefixes = listing.prefixes.clone();
found_prefixes.sort();
assert_eq!(
found_prefixes,
[
RemotePath::from_string("tenant").unwrap(),
RemotePath::from_string("tenant-01").unwrap(),
]
.to_vec()
); );
assert_eq!(listing.keys, [uncle.clone()].to_vec());
Ok(()) Ok(())
} }

View File

@@ -30,7 +30,7 @@ use aws_sdk_s3::{
config::{AsyncSleep, Builder, IdentityCache, Region, SharedAsyncSleep}, config::{AsyncSleep, Builder, IdentityCache, Region, SharedAsyncSleep},
error::SdkError, error::SdkError,
operation::get_object::GetObjectError, operation::get_object::GetObjectError,
types::{Delete, DeleteMarkerEntry, ObjectIdentifier, ObjectVersion, StorageClass}, types::{Delete, DeleteMarkerEntry, ObjectIdentifier, ObjectVersion},
Client, Client,
}; };
use aws_smithy_async::rt::sleep::TokioSleep; use aws_smithy_async::rt::sleep::TokioSleep;
@@ -62,7 +62,6 @@ pub struct S3Bucket {
bucket_name: String, bucket_name: String,
prefix_in_bucket: Option<String>, prefix_in_bucket: Option<String>,
max_keys_per_list_response: Option<i32>, max_keys_per_list_response: Option<i32>,
upload_storage_class: Option<StorageClass>,
concurrency_limiter: ConcurrencyLimiter, concurrency_limiter: ConcurrencyLimiter,
// Per-request timeout. Accessible for tests. // Per-request timeout. Accessible for tests.
pub timeout: Duration, pub timeout: Duration,
@@ -155,7 +154,6 @@ impl S3Bucket {
max_keys_per_list_response: aws_config.max_keys_per_list_response, max_keys_per_list_response: aws_config.max_keys_per_list_response,
prefix_in_bucket, prefix_in_bucket,
concurrency_limiter: ConcurrencyLimiter::new(aws_config.concurrency_limit.get()), concurrency_limiter: ConcurrencyLimiter::new(aws_config.concurrency_limit.get()),
upload_storage_class: aws_config.upload_storage_class.clone(),
timeout, timeout,
}) })
} }
@@ -180,7 +178,10 @@ impl S3Bucket {
pub fn relative_path_to_s3_object(&self, path: &RemotePath) -> String { pub fn relative_path_to_s3_object(&self, path: &RemotePath) -> String {
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR); assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
let path_string = path.get_path().as_str(); let path_string = path
.get_path()
.as_str()
.trim_end_matches(REMOTE_STORAGE_PREFIX_SEPARATOR);
match &self.prefix_in_bucket { match &self.prefix_in_bucket {
Some(prefix) => prefix.clone() + "/" + path_string, Some(prefix) => prefix.clone() + "/" + path_string,
None => path_string.to_string(), None => path_string.to_string(),
@@ -470,11 +471,16 @@ impl RemoteStorage for S3Bucket {
// get the passed prefix or if it is not set use prefix_in_bucket value // get the passed prefix or if it is not set use prefix_in_bucket value
let list_prefix = prefix let list_prefix = prefix
.map(|p| self.relative_path_to_s3_object(p)) .map(|p| self.relative_path_to_s3_object(p))
.or_else(|| { .or_else(|| self.prefix_in_bucket.clone())
self.prefix_in_bucket.clone().map(|mut s| { .map(|mut p| {
s.push(REMOTE_STORAGE_PREFIX_SEPARATOR); // required to end with a separator
s // otherwise request will return only the entry of a prefix
}) if matches!(mode, ListingMode::WithDelimiter)
&& !p.ends_with(REMOTE_STORAGE_PREFIX_SEPARATOR)
{
p.push(REMOTE_STORAGE_PREFIX_SEPARATOR);
}
p
}); });
let _permit = self.permit(kind, cancel).await?; let _permit = self.permit(kind, cancel).await?;
@@ -543,15 +549,11 @@ impl RemoteStorage for S3Bucket {
} }
} }
// S3 gives us prefixes like "foo/", we return them like "foo" result.prefixes.extend(
result.prefixes.extend(prefixes.iter().filter_map(|o| { prefixes
Some( .iter()
self.s3_object_to_relative_path( .filter_map(|o| Some(self.s3_object_to_relative_path(o.prefix()?))),
o.prefix()? );
.trim_end_matches(REMOTE_STORAGE_PREFIX_SEPARATOR),
),
)
}));
continuation_token = match response.next_continuation_token { continuation_token = match response.next_continuation_token {
Some(new_token) => Some(new_token), Some(new_token) => Some(new_token),
@@ -584,7 +586,6 @@ impl RemoteStorage for S3Bucket {
.bucket(self.bucket_name.clone()) .bucket(self.bucket_name.clone())
.key(self.relative_path_to_s3_object(to)) .key(self.relative_path_to_s3_object(to))
.set_metadata(metadata.map(|m| m.0)) .set_metadata(metadata.map(|m| m.0))
.set_storage_class(self.upload_storage_class.clone())
.content_length(from_size_bytes.try_into()?) .content_length(from_size_bytes.try_into()?)
.body(bytes_stream) .body(bytes_stream)
.send(); .send();
@@ -636,7 +637,6 @@ impl RemoteStorage for S3Bucket {
.copy_object() .copy_object()
.bucket(self.bucket_name.clone()) .bucket(self.bucket_name.clone())
.key(self.relative_path_to_s3_object(to)) .key(self.relative_path_to_s3_object(to))
.set_storage_class(self.upload_storage_class.clone())
.copy_source(copy_source) .copy_source(copy_source)
.send(); .send();
@@ -894,7 +894,6 @@ impl RemoteStorage for S3Bucket {
.copy_object() .copy_object()
.bucket(self.bucket_name.clone()) .bucket(self.bucket_name.clone())
.key(key) .key(key)
.set_storage_class(self.upload_storage_class.clone())
.copy_source(&source_id) .copy_source(&source_id)
.send(); .send();
@@ -1051,22 +1050,22 @@ mod tests {
Some("/test/prefix/"), Some("/test/prefix/"),
]; ];
let expected_outputs = [ let expected_outputs = [
vec!["", "some/path", "some/path/"], vec!["", "some/path", "some/path"],
vec!["/", "/some/path", "/some/path/"], vec!["/", "/some/path", "/some/path"],
vec![ vec![
"test/prefix/", "test/prefix/",
"test/prefix/some/path", "test/prefix/some/path",
"test/prefix/some/path/", "test/prefix/some/path",
], ],
vec![ vec![
"test/prefix/", "test/prefix/",
"test/prefix/some/path", "test/prefix/some/path",
"test/prefix/some/path/", "test/prefix/some/path",
], ],
vec![ vec![
"test/prefix/", "test/prefix/",
"test/prefix/some/path", "test/prefix/some/path",
"test/prefix/some/path/", "test/prefix/some/path",
], ],
]; ];
@@ -1078,7 +1077,6 @@ mod tests {
endpoint: None, endpoint: None,
concurrency_limit: NonZeroUsize::new(100).unwrap(), concurrency_limit: NonZeroUsize::new(100).unwrap(),
max_keys_per_list_response: Some(5), max_keys_per_list_response: Some(5),
upload_storage_class: None,
}; };
let storage = let storage =
S3Bucket::new(&config, std::time::Duration::ZERO).expect("remote storage init"); S3Bucket::new(&config, std::time::Duration::ZERO).expect("remote storage init");

View File

@@ -107,6 +107,27 @@ impl UnreliableWrapper {
type VoidStorage = crate::LocalFs; type VoidStorage = crate::LocalFs;
impl RemoteStorage for UnreliableWrapper { impl RemoteStorage for UnreliableWrapper {
async fn list_prefixes(
&self,
prefix: Option<&RemotePath>,
cancel: &CancellationToken,
) -> Result<Vec<RemotePath>, DownloadError> {
self.attempt(RemoteOp::ListPrefixes(prefix.cloned()))
.map_err(DownloadError::Other)?;
self.inner.list_prefixes(prefix, cancel).await
}
async fn list_files(
&self,
folder: Option<&RemotePath>,
max_keys: Option<NonZeroU32>,
cancel: &CancellationToken,
) -> Result<Vec<RemotePath>, DownloadError> {
self.attempt(RemoteOp::ListPrefixes(folder.cloned()))
.map_err(DownloadError::Other)?;
self.inner.list_files(folder, max_keys, cancel).await
}
async fn list( async fn list(
&self, &self,
prefix: Option<&RemotePath>, prefix: Option<&RemotePath>,

View File

@@ -1,6 +1,5 @@
use anyhow::Context; use anyhow::Context;
use camino::Utf8Path; use camino::Utf8Path;
use remote_storage::ListingMode;
use remote_storage::RemotePath; use remote_storage::RemotePath;
use std::sync::Arc; use std::sync::Arc;
use std::{collections::HashSet, num::NonZeroU32}; use std::{collections::HashSet, num::NonZeroU32};
@@ -55,9 +54,9 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
let base_prefix = RemotePath::new(Utf8Path::new(ctx.enabled.base_prefix)) let base_prefix = RemotePath::new(Utf8Path::new(ctx.enabled.base_prefix))
.context("common_prefix construction")?; .context("common_prefix construction")?;
let root_remote_prefixes = test_client let root_remote_prefixes = test_client
.list(None, ListingMode::WithDelimiter, None, &cancel) .list_prefixes(None, &cancel)
.await? .await
.prefixes .context("client list root prefixes failure")?
.into_iter() .into_iter()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
assert_eq!( assert_eq!(
@@ -66,14 +65,9 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
); );
let nested_remote_prefixes = test_client let nested_remote_prefixes = test_client
.list( .list_prefixes(Some(&base_prefix), &cancel)
Some(&base_prefix.add_trailing_slash()), .await
ListingMode::WithDelimiter, .context("client list nested prefixes failure")?
None,
&cancel,
)
.await?
.prefixes
.into_iter() .into_iter()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let remote_only_prefixes = nested_remote_prefixes let remote_only_prefixes = nested_remote_prefixes
@@ -96,13 +90,11 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
/// ///
/// First, create a set of S3 objects with keys `random_prefix/folder{j}/blob_{i}.txt` in [`upload_remote_data`] /// First, create a set of S3 objects with keys `random_prefix/folder{j}/blob_{i}.txt` in [`upload_remote_data`]
/// Then performs the following queries: /// Then performs the following queries:
/// 1. `list(None)`. This should return all files `random_prefix/folder{j}/blob_{i}.txt` /// 1. `list_files(None)`. This should return all files `random_prefix/folder{j}/blob_{i}.txt`
/// 2. `list("folder1")`. This should return all files `random_prefix/folder1/blob_{i}.txt` /// 2. `list_files("folder1")`. This should return all files `random_prefix/folder1/blob_{i}.txt`
#[test_context(MaybeEnabledStorageWithSimpleTestBlobs)] #[test_context(MaybeEnabledStorageWithSimpleTestBlobs)]
#[tokio::test] #[tokio::test]
async fn list_no_delimiter_works( async fn list_files_works(ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs) -> anyhow::Result<()> {
ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs,
) -> anyhow::Result<()> {
let ctx = match ctx { let ctx = match ctx {
MaybeEnabledStorageWithSimpleTestBlobs::Enabled(ctx) => ctx, MaybeEnabledStorageWithSimpleTestBlobs::Enabled(ctx) => ctx,
MaybeEnabledStorageWithSimpleTestBlobs::Disabled => return Ok(()), MaybeEnabledStorageWithSimpleTestBlobs::Disabled => return Ok(()),
@@ -115,36 +107,29 @@ async fn list_no_delimiter_works(
let base_prefix = let base_prefix =
RemotePath::new(Utf8Path::new("folder1")).context("common_prefix construction")?; RemotePath::new(Utf8Path::new("folder1")).context("common_prefix construction")?;
let root_files = test_client let root_files = test_client
.list(None, ListingMode::NoDelimiter, None, &cancel) .list_files(None, None, &cancel)
.await .await
.context("client list root files failure")? .context("client list root files failure")?
.keys
.into_iter() .into_iter()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
assert_eq!( assert_eq!(
root_files, root_files,
ctx.remote_blobs.clone(), ctx.remote_blobs.clone(),
"remote storage list on root mismatches with the uploads." "remote storage list_files on root mismatches with the uploads."
); );
// Test that max_keys limit works. In total there are about 21 files (see // Test that max_keys limit works. In total there are about 21 files (see
// upload_simple_remote_data call in test_real_s3.rs). // upload_simple_remote_data call in test_real_s3.rs).
let limited_root_files = test_client let limited_root_files = test_client
.list( .list_files(None, Some(NonZeroU32::new(2).unwrap()), &cancel)
None,
ListingMode::NoDelimiter,
Some(NonZeroU32::new(2).unwrap()),
&cancel,
)
.await .await
.context("client list root files failure")?; .context("client list root files failure")?;
assert_eq!(limited_root_files.keys.len(), 2); assert_eq!(limited_root_files.len(), 2);
let nested_remote_files = test_client let nested_remote_files = test_client
.list(Some(&base_prefix), ListingMode::NoDelimiter, None, &cancel) .list_files(Some(&base_prefix), None, &cancel)
.await .await
.context("client list nested files failure")? .context("client list nested files failure")?
.keys
.into_iter() .into_iter()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let trim_remote_blobs: HashSet<_> = ctx let trim_remote_blobs: HashSet<_> = ctx
@@ -156,7 +141,7 @@ async fn list_no_delimiter_works(
.collect(); .collect();
assert_eq!( assert_eq!(
nested_remote_files, trim_remote_blobs, nested_remote_files, trim_remote_blobs,
"remote storage list on subdirrectory mismatches with the uploads." "remote storage list_files on subdirrectory mismatches with the uploads."
); );
Ok(()) Ok(())
} }
@@ -214,11 +199,7 @@ async fn delete_objects_works(ctx: &mut MaybeEnabledStorage) -> anyhow::Result<(
ctx.client.delete_objects(&[path1, path2], &cancel).await?; ctx.client.delete_objects(&[path1, path2], &cancel).await?;
let prefixes = ctx let prefixes = ctx.client.list_prefixes(None, &cancel).await?;
.client
.list(None, ListingMode::WithDelimiter, None, &cancel)
.await?
.prefixes;
assert_eq!(prefixes.len(), 1); assert_eq!(prefixes.len(), 1);

View File

@@ -57,6 +57,7 @@ enum MaybeEnabledStorage {
Disabled, Disabled,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorage { impl AsyncTestContext for MaybeEnabledStorage {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();
@@ -85,6 +86,7 @@ struct AzureWithTestBlobs {
remote_blobs: HashSet<RemotePath>, remote_blobs: HashSet<RemotePath>,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs { impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();
@@ -132,6 +134,10 @@ impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
} }
} }
// NOTE: the setups for the list_prefixes test and the list_files test are very similar
// However, they are not idential. The list_prefixes function is concerned with listing prefixes,
// whereas the list_files function is concerned with listing files.
// See `RemoteStorage::list_files` documentation for more details
enum MaybeEnabledStorageWithSimpleTestBlobs { enum MaybeEnabledStorageWithSimpleTestBlobs {
Enabled(AzureWithSimpleTestBlobs), Enabled(AzureWithSimpleTestBlobs),
Disabled, Disabled,
@@ -142,6 +148,7 @@ struct AzureWithSimpleTestBlobs {
remote_blobs: HashSet<RemotePath>, remote_blobs: HashSet<RemotePath>,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs { impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();

View File

@@ -12,8 +12,8 @@ use anyhow::Context;
use camino::Utf8Path; use camino::Utf8Path;
use futures_util::StreamExt; use futures_util::StreamExt;
use remote_storage::{ use remote_storage::{
DownloadError, GenericRemoteStorage, ListingMode, RemotePath, RemoteStorageConfig, DownloadError, GenericRemoteStorage, RemotePath, RemoteStorageConfig, RemoteStorageKind,
RemoteStorageKind, S3Config, S3Config,
}; };
use test_context::test_context; use test_context::test_context;
use test_context::AsyncTestContext; use test_context::AsyncTestContext;
@@ -75,14 +75,11 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow:
client: &Arc<GenericRemoteStorage>, client: &Arc<GenericRemoteStorage>,
cancel: &CancellationToken, cancel: &CancellationToken,
) -> anyhow::Result<HashSet<RemotePath>> { ) -> anyhow::Result<HashSet<RemotePath>> {
Ok( Ok(retry(|| client.list_files(None, None, cancel))
retry(|| client.list(None, ListingMode::NoDelimiter, None, cancel)) .await
.await .context("list root files failure")?
.context("list root files failure")? .into_iter()
.keys .collect::<HashSet<_>>())
.into_iter()
.collect::<HashSet<_>>(),
)
} }
let cancel = CancellationToken::new(); let cancel = CancellationToken::new();
@@ -222,6 +219,7 @@ enum MaybeEnabledStorage {
Disabled, Disabled,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorage { impl AsyncTestContext for MaybeEnabledStorage {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();
@@ -250,6 +248,7 @@ struct S3WithTestBlobs {
remote_blobs: HashSet<RemotePath>, remote_blobs: HashSet<RemotePath>,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs { impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();
@@ -297,6 +296,10 @@ impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
} }
} }
// NOTE: the setups for the list_prefixes test and the list_files test are very similar
// However, they are not idential. The list_prefixes function is concerned with listing prefixes,
// whereas the list_files function is concerned with listing files.
// See `RemoteStorage::list_files` documentation for more details
enum MaybeEnabledStorageWithSimpleTestBlobs { enum MaybeEnabledStorageWithSimpleTestBlobs {
Enabled(S3WithSimpleTestBlobs), Enabled(S3WithSimpleTestBlobs),
Disabled, Disabled,
@@ -307,6 +310,7 @@ struct S3WithSimpleTestBlobs {
remote_blobs: HashSet<RemotePath>, remote_blobs: HashSet<RemotePath>,
} }
#[async_trait::async_trait]
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs { impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
async fn setup() -> Self { async fn setup() -> Self {
ensure_logging_ready(); ensure_logging_ready();
@@ -380,7 +384,6 @@ fn create_s3_client(
endpoint: None, endpoint: None,
concurrency_limit: NonZeroUsize::new(100).unwrap(), concurrency_limit: NonZeroUsize::new(100).unwrap(),
max_keys_per_list_response, max_keys_per_list_response,
upload_storage_class: None,
}), }),
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
}; };

View File

@@ -22,7 +22,6 @@ camino.workspace = true
chrono.workspace = true chrono.workspace = true
heapless.workspace = true heapless.workspace = true
hex = { workspace = true, features = ["serde"] } hex = { workspace = true, features = ["serde"] }
humantime.workspace = true
hyper = { workspace = true, features = ["full"] } hyper = { workspace = true, features = ["full"] }
fail.workspace = true fail.workspace = true
futures = { workspace = true} futures = { workspace = true}

View File

@@ -1,21 +0,0 @@
//! Wrapper around `std::env::var` for parsing environment variables.
use std::{fmt::Display, str::FromStr};
pub fn var<V, E>(varname: &str) -> Option<V>
where
V: FromStr<Err = E>,
E: Display,
{
match std::env::var(varname) {
Ok(s) => Some(
s.parse()
.map_err(|e| format!("failed to parse env var {varname}: {e:#}"))
.unwrap(),
),
Err(std::env::VarError::NotPresent) => None,
Err(std::env::VarError::NotUnicode(_)) => {
panic!("env var {varname} is not unicode")
}
}
}

View File

@@ -34,8 +34,6 @@ pub enum Generation {
/// scenarios where pageservers might otherwise issue conflicting writes to /// scenarios where pageservers might otherwise issue conflicting writes to
/// remote storage /// remote storage
impl Generation { impl Generation {
pub const MAX: Self = Self::Valid(u32::MAX);
/// Create a new Generation that represents a legacy key format with /// Create a new Generation that represents a legacy key format with
/// no generation suffix /// no generation suffix
pub fn none() -> Self { pub fn none() -> Self {

View File

@@ -63,7 +63,6 @@ pub mod measured_stream;
pub mod serde_percent; pub mod serde_percent;
pub mod serde_regex; pub mod serde_regex;
pub mod serde_system_time;
pub mod pageserver_feedback; pub mod pageserver_feedback;
@@ -90,10 +89,6 @@ pub mod yielding_loop;
pub mod zstd; pub mod zstd;
pub mod env;
pub mod poison;
/// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages /// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages
/// ///
/// we have several cases: /// we have several cases:

View File

@@ -1,121 +0,0 @@
//! Protect a piece of state from reuse after it is left in an inconsistent state.
//!
//! # Example
//!
//! ```
//! # tokio_test::block_on(async {
//! use utils::poison::Poison;
//! use std::time::Duration;
//!
//! struct State {
//! clean: bool,
//! }
//! let state = tokio::sync::Mutex::new(Poison::new("mystate", State { clean: true }));
//!
//! let mut mutex_guard = state.lock().await;
//! let mut poison_guard = mutex_guard.check_and_arm()?;
//! let state = poison_guard.data_mut();
//! state.clean = false;
//! // If we get cancelled at this await point, subsequent check_and_arm() calls will fail.
//! tokio::time::sleep(Duration::from_secs(10)).await;
//! state.clean = true;
//! poison_guard.disarm();
//! # Ok::<(), utils::poison::Error>(())
//! # });
//! ```
use tracing::warn;
pub struct Poison<T> {
what: &'static str,
state: State,
data: T,
}
#[derive(Clone, Copy)]
enum State {
Clean,
Armed,
Poisoned { at: chrono::DateTime<chrono::Utc> },
}
impl<T> Poison<T> {
/// We log `what` `warning!` level if the [`Guard`] gets dropped without being [`Guard::disarm`]ed.
pub fn new(what: &'static str, data: T) -> Self {
Self {
what,
state: State::Clean,
data,
}
}
/// Check for poisoning and return a [`Guard`] that provides access to the wrapped state.
pub fn check_and_arm(&mut self) -> Result<Guard<T>, Error> {
match self.state {
State::Clean => {
self.state = State::Armed;
Ok(Guard(self))
}
State::Armed => unreachable!("transient state"),
State::Poisoned { at } => Err(Error::Poisoned {
what: self.what,
at,
}),
}
}
}
/// Use [`Self::data`] and [`Self::data_mut`] to access the wrapped state.
/// Once modifications are done, use [`Self::disarm`].
/// If [`Guard`] gets dropped instead of calling [`Self::disarm`], the state is poisoned
/// and subsequent calls to [`Poison::check_and_arm`] will fail with an error.
pub struct Guard<'a, T>(&'a mut Poison<T>);
impl<'a, T> Guard<'a, T> {
pub fn data(&self) -> &T {
&self.0.data
}
pub fn data_mut(&mut self) -> &mut T {
&mut self.0.data
}
pub fn disarm(self) {
match self.0.state {
State::Clean => unreachable!("we set it to Armed in check_and_arm()"),
State::Armed => {
self.0.state = State::Clean;
}
State::Poisoned { at } => {
unreachable!("we fail check_and_arm() if it's in that state: {at}")
}
}
}
}
impl<'a, T> Drop for Guard<'a, T> {
fn drop(&mut self) {
match self.0.state {
State::Clean => {
// set by disarm()
}
State::Armed => {
// still armed => poison it
let at = chrono::Utc::now();
self.0.state = State::Poisoned { at };
warn!(at=?at, "poisoning {}", self.0.what);
}
State::Poisoned { at } => {
unreachable!("we fail check_and_arm() if it's in that state: {at}")
}
}
}
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("poisoned at {at}: {what}")]
Poisoned {
what: &'static str,
at: chrono::DateTime<chrono::Utc>,
},
}

View File

@@ -2,10 +2,11 @@
use std::cmp::{Eq, Ordering}; use std::cmp::{Eq, Ordering};
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use std::fmt::Debug;
use std::mem; use std::mem;
use std::sync::Mutex; use std::sync::Mutex;
use std::time::Duration; use std::time::Duration;
use tokio::sync::watch::{self, channel}; use tokio::sync::watch::{channel, Receiver, Sender};
use tokio::time::timeout; use tokio::time::timeout;
/// An error happened while waiting for a number /// An error happened while waiting for a number
@@ -34,73 +35,23 @@ pub trait MonotonicCounter<V> {
fn cnt_value(&self) -> V; fn cnt_value(&self) -> V;
} }
/// Heap of waiters, lowest numbers pop first. /// Internal components of a `SeqWait`
struct Waiters<V> struct SeqWaitInt<S, V>
where where
S: MonotonicCounter<V>,
V: Ord, V: Ord,
{ {
heap: BinaryHeap<Waiter<V>>, waiters: BinaryHeap<Waiter<V>>,
/// Number of the first waiter in the heap, or None if there are no waiters. current: S,
status_channel: watch::Sender<Option<V>>, shutdown: bool,
}
impl<V> Waiters<V>
where
V: Ord + Copy,
{
fn new() -> Self {
Waiters {
heap: BinaryHeap::new(),
status_channel: channel(None).0,
}
}
/// `status_channel` contains the number of the first waiter in the heap.
/// This function should be called whenever waiters heap changes.
fn update_status(&self) {
let first_waiter = self.heap.peek().map(|w| w.wake_num);
let _ = self.status_channel.send_replace(first_waiter);
}
/// Add new waiter to the heap, return a channel that will be notified when the number arrives.
fn add(&mut self, num: V) -> watch::Receiver<()> {
let (tx, rx) = channel(());
self.heap.push(Waiter {
wake_num: num,
wake_channel: tx,
});
self.update_status();
rx
}
/// Pop all waiters <= num from the heap. Collect channels in a vector,
/// so that caller can wake them up.
fn pop_leq(&mut self, num: V) -> Vec<watch::Sender<()>> {
let mut wake_these = Vec::new();
while let Some(n) = self.heap.peek() {
if n.wake_num > num {
break;
}
wake_these.push(self.heap.pop().unwrap().wake_channel);
}
self.update_status();
wake_these
}
/// Used on shutdown to efficiently drop all waiters.
fn take_all(&mut self) -> BinaryHeap<Waiter<V>> {
let heap = mem::take(&mut self.heap);
self.update_status();
heap
}
} }
struct Waiter<T> struct Waiter<T>
where where
T: Ord, T: Ord,
{ {
wake_num: T, // wake me when this number arrives ... wake_num: T, // wake me when this number arrives ...
wake_channel: watch::Sender<()>, // ... by sending a message to this channel wake_channel: Sender<()>, // ... by sending a message to this channel
} }
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
@@ -125,17 +76,6 @@ impl<T: Ord> PartialEq for Waiter<T> {
impl<T: Ord> Eq for Waiter<T> {} impl<T: Ord> Eq for Waiter<T> {}
/// Internal components of a `SeqWait`
struct SeqWaitInt<S, V>
where
S: MonotonicCounter<V>,
V: Ord,
{
waiters: Waiters<V>,
current: S,
shutdown: bool,
}
/// A tool for waiting on a sequence number /// A tool for waiting on a sequence number
/// ///
/// This provides a way to wait the arrival of a number. /// This provides a way to wait the arrival of a number.
@@ -168,7 +108,7 @@ where
/// Create a new `SeqWait`, initialized to a particular number /// Create a new `SeqWait`, initialized to a particular number
pub fn new(starting_num: S) -> Self { pub fn new(starting_num: S) -> Self {
let internal = SeqWaitInt { let internal = SeqWaitInt {
waiters: Waiters::new(), waiters: BinaryHeap::new(),
current: starting_num, current: starting_num,
shutdown: false, shutdown: false,
}; };
@@ -188,8 +128,9 @@ where
// Block any future waiters from starting // Block any future waiters from starting
internal.shutdown = true; internal.shutdown = true;
// Take all waiters to drop them later. // This will steal the entire waiters map.
internal.waiters.take_all() // When we drop it all waiters will be woken.
mem::take(&mut internal.waiters)
// Drop the lock as we exit this scope. // Drop the lock as we exit this scope.
}; };
@@ -241,21 +182,9 @@ where
} }
} }
/// Check if [`Self::wait_for`] or [`Self::wait_for_timeout`] would wait if called with `num`.
pub fn would_wait_for(&self, num: V) -> Result<(), V> {
let internal = self.internal.lock().unwrap();
let cnt = internal.current.cnt_value();
drop(internal);
if cnt >= num {
Ok(())
} else {
Err(cnt)
}
}
/// Register and return a channel that will be notified when a number arrives, /// Register and return a channel that will be notified when a number arrives,
/// or None, if it has already arrived. /// or None, if it has already arrived.
fn queue_for_wait(&self, num: V) -> Result<Option<watch::Receiver<()>>, SeqWaitError> { fn queue_for_wait(&self, num: V) -> Result<Option<Receiver<()>>, SeqWaitError> {
let mut internal = self.internal.lock().unwrap(); let mut internal = self.internal.lock().unwrap();
if internal.current.cnt_value() >= num { if internal.current.cnt_value() >= num {
return Ok(None); return Ok(None);
@@ -264,8 +193,12 @@ where
return Err(SeqWaitError::Shutdown); return Err(SeqWaitError::Shutdown);
} }
// Add waiter channel to the queue. // Create a new channel.
let rx = internal.waiters.add(num); let (tx, rx) = channel(());
internal.waiters.push(Waiter {
wake_num: num,
wake_channel: tx,
});
// Drop the lock as we exit this scope. // Drop the lock as we exit this scope.
Ok(Some(rx)) Ok(Some(rx))
} }
@@ -286,8 +219,16 @@ where
} }
internal.current.cnt_advance(num); internal.current.cnt_advance(num);
// Pop all waiters <= num from the heap. // Pop all waiters <= num from the heap. Collect them in a vector, and
internal.waiters.pop_leq(num) // wake them up after releasing the lock.
let mut wake_these = Vec::new();
while let Some(n) = internal.waiters.peek() {
if n.wake_num > num {
break;
}
wake_these.push(internal.waiters.pop().unwrap().wake_channel);
}
wake_these
}; };
for tx in wake_these { for tx in wake_these {
@@ -302,23 +243,6 @@ where
pub fn load(&self) -> S { pub fn load(&self) -> S {
self.internal.lock().unwrap().current self.internal.lock().unwrap().current
} }
/// Get a Receiver for the current status.
///
/// The current status is the number of the first waiter in the queue,
/// or None if there are no waiters.
///
/// This receiver will be notified whenever the status changes.
/// It is useful for receiving notifications when the first waiter
/// starts waiting for a number, or when there are no more waiters left.
pub fn status_receiver(&self) -> watch::Receiver<Option<V>> {
self.internal
.lock()
.unwrap()
.waiters
.status_channel
.subscribe()
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,55 +0,0 @@
//! A `serde::{Deserialize,Serialize}` type for SystemTime with RFC3339 format and millisecond precision.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
#[serde(transparent)]
pub struct SystemTime(
#[serde(
deserialize_with = "deser_rfc3339_millis",
serialize_with = "ser_rfc3339_millis"
)]
pub std::time::SystemTime,
);
fn ser_rfc3339_millis<S: serde::ser::Serializer>(
ts: &std::time::SystemTime,
serializer: S,
) -> Result<S::Ok, S::Error> {
serializer.collect_str(&humantime::format_rfc3339_millis(*ts))
}
fn deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<std::time::SystemTime, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let s: String = serde::de::Deserialize::deserialize(deserializer)?;
humantime::parse_rfc3339(&s).map_err(serde::de::Error::custom)
}
#[cfg(test)]
mod tests {
use super::*;
/// Helper function to make a SystemTime have millisecond precision by truncating additional nanoseconds.
fn to_millisecond_precision(time: SystemTime) -> SystemTime {
match time.0.duration_since(std::time::SystemTime::UNIX_EPOCH) {
Ok(duration) => {
let total_millis = duration.as_secs() * 1_000 + u64::from(duration.subsec_millis());
SystemTime(
std::time::SystemTime::UNIX_EPOCH
+ std::time::Duration::from_millis(total_millis),
)
}
Err(_) => time,
}
}
#[test]
fn test_serialize_deserialize() {
let input = SystemTime(std::time::SystemTime::now());
let expected_serialized = format!("\"{}\"", humantime::format_rfc3339_millis(input.0));
let serialized = serde_json::to_string(&input).unwrap();
assert_eq!(expected_serialized, serialized);
let deserialized: SystemTime = serde_json::from_str(&expected_serialized).unwrap();
assert_eq!(to_millisecond_precision(input), deserialized);
}
}

View File

@@ -192,14 +192,6 @@ impl<T> OnceCell<T> {
} }
} }
/// Like [`Guard::take_and_deinit`], but will return `None` if this OnceCell was never
/// initialized.
pub fn take_and_deinit(&mut self) -> Option<(T, InitPermit)> {
let inner = self.inner.get_mut().unwrap();
inner.take_and_deinit()
}
/// Return the number of [`Self::get_or_init`] calls waiting for initialization to complete. /// Return the number of [`Self::get_or_init`] calls waiting for initialization to complete.
pub fn initializer_count(&self) -> usize { pub fn initializer_count(&self) -> usize {
self.initializers.load(Ordering::Relaxed) self.initializers.load(Ordering::Relaxed)
@@ -254,23 +246,15 @@ impl<'a, T> Guard<'a, T> {
/// The permit will be on a semaphore part of the new internal value, and any following /// The permit will be on a semaphore part of the new internal value, and any following
/// [`OnceCell::get_or_init`] will wait on it to complete. /// [`OnceCell::get_or_init`] will wait on it to complete.
pub fn take_and_deinit(mut self) -> (T, InitPermit) { pub fn take_and_deinit(mut self) -> (T, InitPermit) {
self.0
.take_and_deinit()
.expect("guard is not created unless value has been initialized")
}
}
impl<T> Inner<T> {
pub fn take_and_deinit(&mut self) -> Option<(T, InitPermit)> {
let value = self.value.take()?;
let mut swapped = Inner::default(); let mut swapped = Inner::default();
let sem = swapped.init_semaphore.clone(); let sem = swapped.init_semaphore.clone();
// acquire and forget right away, moving the control over to InitPermit // acquire and forget right away, moving the control over to InitPermit
sem.try_acquire().expect("we just created this").forget(); sem.try_acquire().expect("we just created this").forget();
let permit = InitPermit(sem); std::mem::swap(&mut *self.0, &mut swapped);
std::mem::swap(self, &mut swapped); swapped
Some((value, permit)) .value
.map(|v| (v, InitPermit(sem)))
.expect("guard is not created unless value has been initialized")
} }
} }
@@ -279,13 +263,6 @@ impl<T> Inner<T> {
/// On drop, this type will return the permit. /// On drop, this type will return the permit.
pub struct InitPermit(Arc<tokio::sync::Semaphore>); pub struct InitPermit(Arc<tokio::sync::Semaphore>);
impl std::fmt::Debug for InitPermit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ptr = Arc::as_ptr(&self.0) as *const ();
f.debug_tuple("InitPermit").field(&ptr).finish()
}
}
impl Drop for InitPermit { impl Drop for InitPermit {
fn drop(&mut self) { fn drop(&mut self) {
assert_eq!( assert_eq!(
@@ -582,22 +559,4 @@ mod tests {
assert_eq!(*target.get().unwrap(), 11); assert_eq!(*target.get().unwrap(), 11);
} }
#[tokio::test]
async fn take_and_deinit_on_mut() {
use std::convert::Infallible;
let mut target = OnceCell::<u32>::default();
assert!(target.take_and_deinit().is_none());
target
.get_or_init(|permit| async move { Ok::<_, Infallible>((42, permit)) })
.await
.unwrap();
let again = target.take_and_deinit();
assert!(matches!(again, Some((42, _))), "{again:?}");
assert!(target.take_and_deinit().is_none());
}
} }

View File

@@ -70,7 +70,6 @@ tokio-stream.workspace = true
tokio-util.workspace = true tokio-util.workspace = true
toml_edit = { workspace = true, features = [ "serde" ] } toml_edit = { workspace = true, features = [ "serde" ] }
tracing.workspace = true tracing.workspace = true
twox-hash.workspace = true
url.workspace = true url.workspace = true
walkdir.workspace = true walkdir.workspace = true
metrics.workspace = true metrics.workspace = true

View File

@@ -27,50 +27,30 @@
//! //!
//! # Reference Numbers //! # Reference Numbers
//! //!
//! 2024-04-15 on i3en.3xlarge //! 2024-03-20 on i3en.3xlarge
//! //!
//! ```text //! ```text
//! async-short/1 time: [24.584 µs 24.737 µs 24.922 µs] //! short/1 time: [26.483 µs 26.614 µs 26.767 µs]
//! async-short/2 time: [33.479 µs 33.660 µs 33.888 µs] //! short/2 time: [32.223 µs 32.465 µs 32.767 µs]
//! async-short/4 time: [42.713 µs 43.046 µs 43.440 µs] //! short/4 time: [47.203 µs 47.583 µs 47.984 µs]
//! async-short/8 time: [71.814 µs 72.478 µs 73.240 µs] //! short/8 time: [89.135 µs 89.612 µs 90.139 µs]
//! async-short/16 time: [132.73 µs 134.45 µs 136.22 µs] //! short/16 time: [190.12 µs 191.52 µs 192.88 µs]
//! async-short/32 time: [258.31 µs 260.73 µs 263.27 µs] //! short/32 time: [380.96 µs 382.63 µs 384.20 µs]
//! async-short/64 time: [511.61 µs 514.44 µs 517.51 µs] //! short/64 time: [736.86 µs 741.07 µs 745.03 µs]
//! async-short/128 time: [992.64 µs 998.23 µs 1.0042 ms] //! short/128 time: [1.4106 ms 1.4206 ms 1.4294 ms]
//! async-medium/1 time: [110.11 µs 110.50 µs 110.96 µs] //! medium/1 time: [111.81 µs 112.25 µs 112.79 µs]
//! async-medium/2 time: [153.06 µs 153.85 µs 154.99 µs] //! medium/2 time: [158.26 µs 159.13 µs 160.21 µs]
//! async-medium/4 time: [317.51 µs 319.92 µs 322.85 µs] //! medium/4 time: [334.65 µs 337.14 µs 340.07 µs]
//! async-medium/8 time: [638.30 µs 644.68 µs 652.12 µs] //! medium/8 time: [675.32 µs 679.91 µs 685.25 µs]
//! async-medium/16 time: [1.2651 ms 1.2773 ms 1.2914 ms] //! medium/16 time: [1.2929 ms 1.2996 ms 1.3067 ms]
//! async-medium/32 time: [2.5117 ms 2.5410 ms 2.5720 ms] //! medium/32 time: [2.4295 ms 2.4461 ms 2.4623 ms]
//! async-medium/64 time: [4.8088 ms 4.8555 ms 4.9047 ms] //! medium/64 time: [4.3973 ms 4.4458 ms 4.4875 ms]
//! async-medium/128 time: [8.8311 ms 8.9849 ms 9.1263 ms] //! medium/128 time: [7.5955 ms 7.7847 ms 7.9481 ms]
//! sync-short/1 time: [25.503 µs 25.626 µs 25.771 µs]
//! sync-short/2 time: [30.850 µs 31.013 µs 31.208 µs]
//! sync-short/4 time: [45.543 µs 45.856 µs 46.193 µs]
//! sync-short/8 time: [84.114 µs 84.639 µs 85.220 µs]
//! sync-short/16 time: [185.22 µs 186.15 µs 187.13 µs]
//! sync-short/32 time: [377.43 µs 378.87 µs 380.46 µs]
//! sync-short/64 time: [756.49 µs 759.04 µs 761.70 µs]
//! sync-short/128 time: [1.4825 ms 1.4874 ms 1.4923 ms]
//! sync-medium/1 time: [105.66 µs 106.01 µs 106.43 µs]
//! sync-medium/2 time: [153.10 µs 153.84 µs 154.72 µs]
//! sync-medium/4 time: [327.13 µs 329.44 µs 332.27 µs]
//! sync-medium/8 time: [654.26 µs 658.73 µs 663.63 µs]
//! sync-medium/16 time: [1.2682 ms 1.2748 ms 1.2816 ms]
//! sync-medium/32 time: [2.4456 ms 2.4595 ms 2.4731 ms]
//! sync-medium/64 time: [4.6523 ms 4.6890 ms 4.7256 ms]
//! sync-medium/128 time: [8.7215 ms 8.8323 ms 8.9344 ms]
//! ``` //! ```
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
use criterion::{BenchmarkId, Criterion}; use criterion::{BenchmarkId, Criterion};
use pageserver::{ use pageserver::{config::PageServerConf, walrecord::NeonWalRecord, walredo::PostgresRedoManager};
config::PageServerConf,
walrecord::NeonWalRecord,
walredo::{PostgresRedoManager, ProcessKind},
};
use pageserver_api::{key::Key, shard::TenantShardId}; use pageserver_api::{key::Key, shard::TenantShardId};
use std::{ use std::{
sync::Arc, sync::Arc,
@@ -80,39 +60,33 @@ use tokio::{sync::Barrier, task::JoinSet};
use utils::{id::TenantId, lsn::Lsn}; use utils::{id::TenantId, lsn::Lsn};
fn bench(c: &mut Criterion) { fn bench(c: &mut Criterion) {
for process_kind in &[ProcessKind::Async, ProcessKind::Sync] { {
{ let nclients = [1, 2, 4, 8, 16, 32, 64, 128];
let nclients = [1, 2, 4, 8, 16, 32, 64, 128]; for nclients in nclients {
for nclients in nclients { let mut group = c.benchmark_group("short");
let mut group = c.benchmark_group(format!("{process_kind}-short")); group.bench_with_input(
group.bench_with_input( BenchmarkId::from_parameter(nclients),
BenchmarkId::from_parameter(nclients), &nclients,
&nclients, |b, nclients| {
|b, nclients| { let redo_work = Arc::new(Request::short_input());
let redo_work = Arc::new(Request::short_input()); b.iter_custom(|iters| bench_impl(Arc::clone(&redo_work), iters, *nclients));
b.iter_custom(|iters| { },
bench_impl(*process_kind, Arc::clone(&redo_work), iters, *nclients) );
});
},
);
}
} }
}
{ {
let nclients = [1, 2, 4, 8, 16, 32, 64, 128]; let nclients = [1, 2, 4, 8, 16, 32, 64, 128];
for nclients in nclients { for nclients in nclients {
let mut group = c.benchmark_group(format!("{process_kind}-medium")); let mut group = c.benchmark_group("medium");
group.bench_with_input( group.bench_with_input(
BenchmarkId::from_parameter(nclients), BenchmarkId::from_parameter(nclients),
&nclients, &nclients,
|b, nclients| { |b, nclients| {
let redo_work = Arc::new(Request::medium_input()); let redo_work = Arc::new(Request::medium_input());
b.iter_custom(|iters| { b.iter_custom(|iters| bench_impl(Arc::clone(&redo_work), iters, *nclients));
bench_impl(*process_kind, Arc::clone(&redo_work), iters, *nclients) },
}); );
},
);
}
} }
} }
} }
@@ -120,16 +94,10 @@ criterion::criterion_group!(benches, bench);
criterion::criterion_main!(benches); criterion::criterion_main!(benches);
// Returns the sum of each client's wall-clock time spent executing their share of the n_redos. // Returns the sum of each client's wall-clock time spent executing their share of the n_redos.
fn bench_impl( fn bench_impl(redo_work: Arc<Request>, n_redos: u64, nclients: u64) -> Duration {
process_kind: ProcessKind,
redo_work: Arc<Request>,
n_redos: u64,
nclients: u64,
) -> Duration {
let repo_dir = camino_tempfile::tempdir_in(env!("CARGO_TARGET_TMPDIR")).unwrap(); let repo_dir = camino_tempfile::tempdir_in(env!("CARGO_TARGET_TMPDIR")).unwrap();
let mut conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf()); let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
conf.walredo_process_kind = process_kind;
let conf = Box::leak(Box::new(conf)); let conf = Box::leak(Box::new(conf));
let tenant_shard_id = TenantShardId::unsharded(TenantId::generate()); let tenant_shard_id = TenantShardId::unsharded(TenantId::generate());
@@ -145,40 +113,25 @@ fn bench_impl(
let manager = PostgresRedoManager::new(conf, tenant_shard_id); let manager = PostgresRedoManager::new(conf, tenant_shard_id);
let manager = Arc::new(manager); let manager = Arc::new(manager);
// divide the amount of work equally among the clients.
let nredos_per_client = n_redos / nclients;
for _ in 0..nclients { for _ in 0..nclients {
rt.block_on(async { rt.block_on(async {
tasks.spawn(client( tasks.spawn(client(
Arc::clone(&manager), Arc::clone(&manager),
Arc::clone(&start), Arc::clone(&start),
Arc::clone(&redo_work), Arc::clone(&redo_work),
nredos_per_client, // divide the amount of work equally among the clients
n_redos / nclients,
)) ))
}); });
} }
let elapsed = rt.block_on(async move { rt.block_on(async move {
let mut total_wallclock_time = Duration::ZERO; let mut total_wallclock_time = std::time::Duration::from_millis(0);
while let Some(res) = tasks.join_next().await { while let Some(res) = tasks.join_next().await {
total_wallclock_time += res.unwrap(); total_wallclock_time += res.unwrap();
} }
total_wallclock_time total_wallclock_time
}); })
// consistency check to ensure process kind setting worked
if nredos_per_client > 0 {
assert_eq!(
manager
.status()
.process
.map(|p| p.kind)
.expect("the benchmark work causes a walredo process to be spawned"),
std::borrow::Cow::Borrowed(process_kind.into())
);
}
elapsed
} }
async fn client( async fn client(

View File

@@ -128,12 +128,12 @@ impl Client {
pub async fn timeline_info( pub async fn timeline_info(
&self, &self,
tenant_shard_id: TenantShardId, tenant_id: TenantId,
timeline_id: TimelineId, timeline_id: TimelineId,
force_await_logical_size: ForceAwaitLogicalSize, force_await_logical_size: ForceAwaitLogicalSize,
) -> Result<pageserver_api::models::TimelineInfo> { ) -> Result<pageserver_api::models::TimelineInfo> {
let uri = format!( let uri = format!(
"{}/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}", "{}/v1/tenant/{tenant_id}/timeline/{timeline_id}",
self.mgmt_api_endpoint self.mgmt_api_endpoint
); );
@@ -151,11 +151,11 @@ impl Client {
pub async fn keyspace( pub async fn keyspace(
&self, &self,
tenant_shard_id: TenantShardId, tenant_id: TenantId,
timeline_id: TimelineId, timeline_id: TimelineId,
) -> Result<pageserver_api::models::partitioning::Partitioning> { ) -> Result<pageserver_api::models::partitioning::Partitioning> {
let uri = format!( let uri = format!(
"{}/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/keyspace", "{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/keyspace",
self.mgmt_api_endpoint self.mgmt_api_endpoint
); );
self.get(&uri) self.get(&uri)
@@ -243,19 +243,6 @@ impl Client {
Ok(()) Ok(())
} }
pub async fn tenant_scan_remote_storage(
&self,
tenant_id: TenantId,
) -> Result<TenantScanRemoteStorageResponse> {
let uri = format!(
"{}/v1/tenant/{tenant_id}/scan_remote_storage",
self.mgmt_api_endpoint
);
let response = self.request(Method::GET, &uri, ()).await?;
let body = response.json().await.map_err(Error::ReceiveBody)?;
Ok(body)
}
pub async fn tenant_config(&self, req: &TenantConfigRequest) -> Result<()> { pub async fn tenant_config(&self, req: &TenantConfigRequest) -> Result<()> {
let uri = format!("{}/v1/tenant/config", self.mgmt_api_endpoint); let uri = format!("{}/v1/tenant/config", self.mgmt_api_endpoint);
self.request(Method::PUT, &uri, req).await?; self.request(Method::PUT, &uri, req).await?;
@@ -291,7 +278,10 @@ impl Client {
flush_ms: Option<std::time::Duration>, flush_ms: Option<std::time::Duration>,
lazy: bool, lazy: bool,
) -> Result<()> { ) -> Result<()> {
let req_body = TenantLocationConfigRequest { config }; let req_body = TenantLocationConfigRequest {
tenant_id: Some(tenant_shard_id),
config,
};
let mut path = reqwest::Url::parse(&format!( let mut path = reqwest::Url::parse(&format!(
"{}/v1/tenant/{}/location_config", "{}/v1/tenant/{}/location_config",

View File

@@ -60,7 +60,7 @@ impl Client {
) -> anyhow::Result<PagestreamClient> { ) -> anyhow::Result<PagestreamClient> {
let copy_both: tokio_postgres::CopyBothDuplex<bytes::Bytes> = self let copy_both: tokio_postgres::CopyBothDuplex<bytes::Bytes> = self
.client .client
.copy_both_simple(&format!("pagestream_v2 {tenant_id} {timeline_id}")) .copy_both_simple(&format!("pagestream {tenant_id} {timeline_id}"))
.await?; .await?;
let Client { let Client {
cancel_on_client_drop, cancel_on_client_drop,

View File

@@ -11,6 +11,7 @@ default = []
anyhow.workspace = true anyhow.workspace = true
async-compression.workspace = true async-compression.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait.workspace = true
byteorder.workspace = true byteorder.workspace = true
bytes.workspace = true bytes.workspace = true
chrono = { workspace = true, features = ["serde"] } chrono = { workspace = true, features = ["serde"] }

View File

@@ -18,7 +18,6 @@
//! database size. For example, if the logical database size is 10 GB, we would //! database size. For example, if the logical database size is 10 GB, we would
//! generate new image layers every 10 GB of WAL. //! generate new image layers every 10 GB of WAL.
use futures::StreamExt; use futures::StreamExt;
use pageserver_api::shard::ShardIdentity;
use tracing::{debug, info}; use tracing::{debug, info};
use std::collections::{HashSet, VecDeque}; use std::collections::{HashSet, VecDeque};
@@ -44,8 +43,7 @@ pub async fn compact_tiered<E: CompactionJobExecutor>(
fanout: u64, fanout: u64,
ctx: &E::RequestContext, ctx: &E::RequestContext,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
assert!(fanout >= 1, "fanout needs to be at least 1 but is {fanout}"); assert!(fanout >= 2);
let exp_base = fanout.max(2);
// Start at L0 // Start at L0
let mut current_level_no = 0; let mut current_level_no = 0;
let mut current_level_target_height = target_file_size; let mut current_level_target_height = target_file_size;
@@ -108,7 +106,7 @@ pub async fn compact_tiered<E: CompactionJobExecutor>(
break; break;
} }
current_level_no += 1; current_level_no += 1;
current_level_target_height = current_level_target_height.saturating_mul(exp_base); current_level_target_height = current_level_target_height.saturating_mul(fanout);
} }
Ok(()) Ok(())
} }
@@ -126,7 +124,6 @@ async fn compact_level<E: CompactionJobExecutor>(
} }
let mut state = LevelCompactionState { let mut state = LevelCompactionState {
shard_identity: *executor.get_shard_identity(),
target_file_size, target_file_size,
_lsn_range: lsn_range.clone(), _lsn_range: lsn_range.clone(),
layers: layer_fragments, layers: layer_fragments,
@@ -166,8 +163,6 @@ struct LevelCompactionState<'a, E>
where where
E: CompactionJobExecutor, E: CompactionJobExecutor,
{ {
shard_identity: ShardIdentity,
// parameters // parameters
target_file_size: u64, target_file_size: u64,
@@ -370,7 +365,6 @@ where
.executor .executor
.get_keyspace(&job.key_range, job.lsn_range.end, ctx) .get_keyspace(&job.key_range, job.lsn_range.end, ctx)
.await?, .await?,
&self.shard_identity,
) * 8192; ) * 8192;
let wal_size = job let wal_size = job
@@ -435,7 +429,7 @@ where
keyspace, keyspace,
self.target_file_size / 8192, self.target_file_size / 8192,
); );
while let Some(key_range) = window.choose_next_image(&self.shard_identity) { while let Some(key_range) = window.choose_next_image() {
new_jobs.push(CompactionJob::<E> { new_jobs.push(CompactionJob::<E> {
key_range, key_range,
lsn_range: job.lsn_range.clone(), lsn_range: job.lsn_range.clone(),
@@ -628,12 +622,7 @@ impl<K: CompactionKey> KeyspaceWindowPos<K> {
} }
// Advance the cursor until it reaches 'target_keysize'. // Advance the cursor until it reaches 'target_keysize'.
fn advance_until_size( fn advance_until_size(&mut self, w: &KeyspaceWindowHead<K>, max_size: u64) {
&mut self,
w: &KeyspaceWindowHead<K>,
max_size: u64,
shard_identity: &ShardIdentity,
) {
while self.accum_keysize < max_size && !self.reached_end(w) { while self.accum_keysize < max_size && !self.reached_end(w) {
let curr_range = &w.keyspace[self.keyspace_idx]; let curr_range = &w.keyspace[self.keyspace_idx];
if self.end_key < curr_range.start { if self.end_key < curr_range.start {
@@ -642,7 +631,7 @@ impl<K: CompactionKey> KeyspaceWindowPos<K> {
} }
// We're now within 'curr_range'. Can we advance past it completely? // We're now within 'curr_range'. Can we advance past it completely?
let distance = K::key_range_size(&(self.end_key..curr_range.end), shard_identity); let distance = K::key_range_size(&(self.end_key..curr_range.end));
if (self.accum_keysize + distance as u64) < max_size { if (self.accum_keysize + distance as u64) < max_size {
// oh yeah, it fits // oh yeah, it fits
self.end_key = curr_range.end; self.end_key = curr_range.end;
@@ -651,7 +640,7 @@ impl<K: CompactionKey> KeyspaceWindowPos<K> {
} else { } else {
// advance within the range // advance within the range
let skip_key = self.end_key.skip_some(); let skip_key = self.end_key.skip_some();
let distance = K::key_range_size(&(self.end_key..skip_key), shard_identity); let distance = K::key_range_size(&(self.end_key..skip_key));
if (self.accum_keysize + distance as u64) < max_size { if (self.accum_keysize + distance as u64) < max_size {
self.end_key = skip_key; self.end_key = skip_key;
self.accum_keysize += distance as u64; self.accum_keysize += distance as u64;
@@ -687,7 +676,7 @@ where
} }
} }
fn choose_next_image(&mut self, shard_identity: &ShardIdentity) -> Option<Range<K>> { fn choose_next_image(&mut self) -> Option<Range<K>> {
if self.start_pos.keyspace_idx == self.head.keyspace.len() { if self.start_pos.keyspace_idx == self.head.keyspace.len() {
// we've reached the end // we've reached the end
return None; return None;
@@ -697,7 +686,6 @@ where
next_pos.advance_until_size( next_pos.advance_until_size(
&self.head, &self.head,
self.start_pos.accum_keysize + self.head.target_keysize, self.start_pos.accum_keysize + self.head.target_keysize,
shard_identity,
); );
// See if we can gobble up the rest of the keyspace if we stretch out the layer, up to // See if we can gobble up the rest of the keyspace if we stretch out the layer, up to
@@ -706,7 +694,6 @@ where
end_pos.advance_until_size( end_pos.advance_until_size(
&self.head, &self.head,
self.start_pos.accum_keysize + (self.head.target_keysize * 5 / 4), self.start_pos.accum_keysize + (self.head.target_keysize * 5 / 4),
shard_identity,
); );
if end_pos.reached_end(&self.head) { if end_pos.reached_end(&self.head) {
// gobble up any unused keyspace between the last used key and end of the range // gobble up any unused keyspace between the last used key and end of the range

View File

@@ -5,7 +5,6 @@ use crate::interface::*;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use itertools::Itertools; use itertools::Itertools;
use pageserver_api::shard::ShardIdentity;
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use std::collections::VecDeque; use std::collections::VecDeque;
@@ -14,17 +13,11 @@ use std::ops::{DerefMut, Range};
use std::pin::Pin; use std::pin::Pin;
use std::task::{ready, Poll}; use std::task::{ready, Poll};
pub fn keyspace_total_size<K>( pub fn keyspace_total_size<K>(keyspace: &CompactionKeySpace<K>) -> u64
keyspace: &CompactionKeySpace<K>,
shard_identity: &ShardIdentity,
) -> u64
where where
K: CompactionKey, K: CompactionKey,
{ {
keyspace keyspace.iter().map(|r| K::key_range_size(r) as u64).sum()
.iter()
.map(|r| K::key_range_size(r, shard_identity) as u64)
.sum()
} }
pub fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool { pub fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
@@ -187,7 +180,7 @@ where
match top.deref_mut() { match top.deref_mut() {
LazyLoadLayer::Unloaded(ref mut l) => { LazyLoadLayer::Unloaded(ref mut l) => {
let fut = l.load_keys(this.ctx); let fut = l.load_keys(this.ctx);
this.load_future.set(Some(Box::pin(fut))); this.load_future.set(Some(fut));
continue; continue;
} }
LazyLoadLayer::Loaded(ref mut entries) => { LazyLoadLayer::Loaded(ref mut entries) => {

Some files were not shown because too many files have changed in this diff Show More