mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-25 22:30:38 +00:00
Compare commits
66 Commits
persistent
...
im_layer_m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c50e13d9d5 | ||
|
|
db72f432e5 | ||
|
|
5a6284f6f8 | ||
|
|
c11c19d568 | ||
|
|
d9a239475c | ||
|
|
a4311bd961 | ||
|
|
e0f23242be | ||
|
|
0de1ec14e0 | ||
|
|
0053ccac13 | ||
|
|
b6686d63b0 | ||
|
|
edf8a08dcc | ||
|
|
b51d766a44 | ||
|
|
c9b1655885 | ||
|
|
206ddec636 | ||
|
|
cb44698c47 | ||
|
|
af325a30db | ||
|
|
be499156e5 | ||
|
|
4a11f6e2de | ||
|
|
d9190aae87 | ||
|
|
6bce11e810 | ||
|
|
39003aa9f3 | ||
|
|
a8f0d27c92 | ||
|
|
fe280f70aa | ||
|
|
faf1d20e6a | ||
|
|
d9ab42013f | ||
|
|
edfebad3a1 | ||
|
|
b9544adcb4 | ||
|
|
ebb51f16e0 | ||
|
|
136b029d7a | ||
|
|
33834c01ec | ||
|
|
9a6c0be823 | ||
|
|
baa8d5a16a | ||
|
|
fbd5f65938 | ||
|
|
1f1324ebed | ||
|
|
fb633b16ac | ||
|
|
f277140234 | ||
|
|
52166799bd | ||
|
|
0a4e5f8aa3 | ||
|
|
0c1195c30d | ||
|
|
3ba92d238e | ||
|
|
67469339fa | ||
|
|
0205a44265 | ||
|
|
480175852f | ||
|
|
9fdd228dee | ||
|
|
15db566420 | ||
|
|
1a316a264d | ||
|
|
aeeb782342 | ||
|
|
ae53dc3326 | ||
|
|
1ca76776d0 | ||
|
|
10d554fcbb | ||
|
|
2ce5d8137d | ||
|
|
a406783098 | ||
|
|
e6db4b63eb | ||
|
|
0b0cb77da4 | ||
|
|
47734fdb0a | ||
|
|
9c886ac0a0 | ||
|
|
b6989e8928 | ||
|
|
46ea2a8e96 | ||
|
|
5bca7713c1 | ||
|
|
99d9c23df5 | ||
|
|
05db6458df | ||
|
|
2d42f84389 | ||
|
|
aee3eb6d19 | ||
|
|
a6e4a3c3ef | ||
|
|
21ec28d9bc | ||
|
|
de8f24583f |
@@ -14,6 +14,7 @@
|
||||
!pgxn/
|
||||
!proxy/
|
||||
!safekeeper/
|
||||
!storage_broker/
|
||||
!vendor/postgres-v14/
|
||||
!vendor/postgres-v15/
|
||||
!workspace_hack/
|
||||
|
||||
39
.github/actions/allure-report/action.yml
vendored
39
.github/actions/allure-report/action.yml
vendored
@@ -32,8 +32,8 @@ runs:
|
||||
exit 2
|
||||
fi
|
||||
|
||||
- name: Calculate key
|
||||
id: calculate-key
|
||||
- name: Calculate variables
|
||||
id: calculate-vars
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
||||
@@ -41,14 +41,22 @@ runs:
|
||||
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||
if [ "${pr_number}" != "null" ]; then
|
||||
key=pr-${pr_number}
|
||||
elif [ "${GITHUB_REF}" = "refs/heads/main" ]; then
|
||||
elif [ "${GITHUB_REF_NAME}" = "main" ]; then
|
||||
# Shortcut for a special branch
|
||||
key=main
|
||||
elif [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||
# Shortcut for a special branch
|
||||
key=release
|
||||
else
|
||||
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
|
||||
key=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||
fi
|
||||
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Sanitize test selection to remove `/` and any other special characters
|
||||
# Use printf instead of echo to avoid having `\n` at the end of the string
|
||||
test_selection=$(printf "${{ inputs.test_selection }}" | tr -c "[:alnum:]._-" "-" )
|
||||
echo "TEST_SELECTION=${test_selection}" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/setup-java@v3
|
||||
if: ${{ inputs.action == 'generate' }}
|
||||
with:
|
||||
@@ -74,10 +82,11 @@ runs:
|
||||
- name: Upload Allure results
|
||||
if: ${{ inputs.action == 'store' }}
|
||||
env:
|
||||
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||
TEST_OUTPUT: /tmp/test_output
|
||||
BUCKET: neon-github-public-dev
|
||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# Add metadata
|
||||
@@ -98,7 +107,7 @@ runs:
|
||||
BUILD_TYPE=${{ inputs.build_type }}
|
||||
EOF
|
||||
|
||||
ARCHIVE="${GITHUB_RUN_ID}-${{ inputs.test_selection }}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||
ARCHIVE="${GITHUB_RUN_ID}-${TEST_SELECTION}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||
ZSTD_NBTHREADS=0
|
||||
|
||||
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
||||
@@ -109,8 +118,9 @@ runs:
|
||||
if: ${{ inputs.action == 'generate' }}
|
||||
shell: bash -euxo pipefail {0}
|
||||
env:
|
||||
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
||||
BUCKET: neon-github-public-dev
|
||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||
run: |
|
||||
LOCK_TIMEOUT=300 # seconds
|
||||
|
||||
@@ -123,12 +133,12 @@ runs:
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" > lock.txt
|
||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" > lock.txt
|
||||
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||
|
||||
# A double-check that exactly WE have acquired the lock
|
||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
@@ -137,8 +147,8 @@ runs:
|
||||
if: ${{ inputs.action == 'generate' }}
|
||||
id: generate-report
|
||||
env:
|
||||
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||
TEST_OUTPUT: /tmp/test_output
|
||||
BUCKET: neon-github-public-dev
|
||||
shell: bash -euxo pipefail {0}
|
||||
@@ -192,12 +202,13 @@ runs:
|
||||
if: ${{ inputs.action == 'generate' && always() }}
|
||||
shell: bash -euxo pipefail {0}
|
||||
env:
|
||||
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
||||
BUCKET: neon-github-public-dev
|
||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||
run: |
|
||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||
|
||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
||||
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||
fi
|
||||
|
||||
|
||||
154
.github/actions/neon-branch-create/action.yml
vendored
Normal file
154
.github/actions/neon-branch-create/action.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: 'Create Branch'
|
||||
description: 'Create Branch using API'
|
||||
|
||||
inputs:
|
||||
api_key:
|
||||
desctiption: 'Neon API key'
|
||||
required: true
|
||||
environment:
|
||||
desctiption: 'dev (aka captest) or staging'
|
||||
required: true
|
||||
project_id:
|
||||
desctiption: 'ID of the Project to create Branch in'
|
||||
required: true
|
||||
outputs:
|
||||
dsn:
|
||||
description: 'Created Branch DSN (for main database)'
|
||||
value: ${{ steps.change-password.outputs.dsn }}
|
||||
branch_id:
|
||||
description: 'Created Branch ID'
|
||||
value: ${{ steps.create-branch.outputs.branch_id }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Parse Input
|
||||
id: parse-input
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
case "${ENVIRONMENT}" in
|
||||
dev)
|
||||
API_HOST=console.dev.neon.tech
|
||||
;;
|
||||
staging)
|
||||
API_HOST=console.stage.neon.tech
|
||||
;;
|
||||
*)
|
||||
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
ENVIRONMENT: ${{ inputs.environment }}
|
||||
|
||||
- name: Create New Branch
|
||||
id: create-branch
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
for i in $(seq 1 10); do
|
||||
branch=$(curl \
|
||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches" \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}" \
|
||||
--data "{
|
||||
\"branch\": {
|
||||
\"name\": \"Created by actions/neon-branch-create; GITHUB_RUN_ID=${GITHUB_RUN_ID} at $(date +%s)\"
|
||||
}
|
||||
}")
|
||||
|
||||
if [ -z "${branch}" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
||||
if [ "${branch_id}" == "null" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
break
|
||||
done
|
||||
|
||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||
echo 2>&1 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
||||
echo "branch_id=${branch_id}" >> $GITHUB_OUTPUT
|
||||
|
||||
host=$(echo $branch | jq --raw-output '.endpoints[0].host')
|
||||
echo "host=${host}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||
PROJECT_ID: ${{ inputs.project_id }}
|
||||
|
||||
- name: Get Role name
|
||||
id: role-name
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
roles=$(curl \
|
||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles" \
|
||||
--fail \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}"
|
||||
)
|
||||
|
||||
role_name=$(echo $roles | jq --raw-output '.roles[] | select(.protected == false) | .name')
|
||||
echo "role_name=${role_name}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||
PROJECT_ID: ${{ inputs.project_id }}
|
||||
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
||||
|
||||
- name: Change Password
|
||||
id: change-password
|
||||
# A shell without `set -x` to not to expose password/dsn in logs
|
||||
shell: bash -euo pipefail {0}
|
||||
run: |
|
||||
for i in $(seq 1 10); do
|
||||
reset_password=$(curl \
|
||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles/${ROLE_NAME}/reset_password" \
|
||||
--request POST \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}"
|
||||
)
|
||||
|
||||
if [ -z "${reset_password}" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
password=$(echo $reset_password | jq --raw-output '.role.password')
|
||||
if [ "${password}" == "null" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "::add-mask::${password}"
|
||||
break
|
||||
done
|
||||
|
||||
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
||||
echo 2>&1 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dsn="postgres://${ROLE_NAME}:${password}@${HOST}/neondb"
|
||||
echo "::add-mask::${dsn}"
|
||||
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||
PROJECT_ID: ${{ inputs.project_id }}
|
||||
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
||||
ROLE_NAME: ${{ steps.role-name.outputs.role_name }}
|
||||
HOST: ${{ steps.create-branch.outputs.host }}
|
||||
79
.github/actions/neon-branch-delete/action.yml
vendored
Normal file
79
.github/actions/neon-branch-delete/action.yml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
name: 'Delete Branch'
|
||||
description: 'Delete Branch using API'
|
||||
|
||||
inputs:
|
||||
api_key:
|
||||
desctiption: 'Neon API key'
|
||||
required: true
|
||||
environment:
|
||||
desctiption: 'dev (aka captest) or staging'
|
||||
required: true
|
||||
project_id:
|
||||
desctiption: 'ID of the Project which should be deleted'
|
||||
required: true
|
||||
branch_id:
|
||||
desctiption: 'ID of the branch to delete'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Parse Input
|
||||
id: parse-input
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
case "${ENVIRONMENT}" in
|
||||
dev)
|
||||
API_HOST=console.dev.neon.tech
|
||||
;;
|
||||
staging)
|
||||
API_HOST=console.stage.neon.tech
|
||||
;;
|
||||
*)
|
||||
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
ENVIRONMENT: ${{ inputs.environment }}
|
||||
|
||||
- name: Delete Branch
|
||||
# Do not try to delete a branch if .github/actions/neon-project-create
|
||||
# or .github/actions/neon-branch-create failed before
|
||||
if: ${{ inputs.project_id != '' && inputs.branch_id != '' }}
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
for i in $(seq 1 10); do
|
||||
deleted_branch=$(curl \
|
||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}" \
|
||||
--request DELETE \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}"
|
||||
)
|
||||
|
||||
if [ -z "${deleted_branch}" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
branch_id=$(echo $deleted_branch | jq --raw-output '.branch.id')
|
||||
if [ "${branch_id}" == "null" ]; then
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
|
||||
break
|
||||
done
|
||||
|
||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||
echo 2>&1 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
PROJECT_ID: ${{ inputs.project_id }}
|
||||
BRANCH_ID: ${{ inputs.branch_id }}
|
||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
||||
13
.github/actions/neon-project-create/action.yml
vendored
13
.github/actions/neon-project-create/action.yml
vendored
@@ -6,7 +6,7 @@ inputs:
|
||||
desctiption: 'Neon API key'
|
||||
required: true
|
||||
environment:
|
||||
desctiption: 'dev (aka captest) or stage'
|
||||
desctiption: 'dev (aka captest) or staging'
|
||||
required: true
|
||||
region_id:
|
||||
desctiption: 'Region ID, if not set the project will be created in the default region'
|
||||
@@ -29,11 +29,11 @@ runs:
|
||||
case "${ENVIRONMENT}" in
|
||||
dev)
|
||||
API_HOST=console.dev.neon.tech
|
||||
REGION_ID=${REGION_ID:-eu-west-1}
|
||||
REGION_ID=${REGION_ID:-aws-eu-west-1}
|
||||
;;
|
||||
staging)
|
||||
API_HOST=console.stage.neon.tech
|
||||
REGION_ID=${REGION_ID:-us-east-1}
|
||||
REGION_ID=${REGION_ID:-aws-us-east-2}
|
||||
;;
|
||||
*)
|
||||
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
||||
@@ -53,7 +53,7 @@ runs:
|
||||
shell: bash -euo pipefail {0}
|
||||
run: |
|
||||
project=$(curl \
|
||||
"https://${API_HOST}/api/v1/projects" \
|
||||
"https://${API_HOST}/api/v2/projects" \
|
||||
--fail \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
@@ -61,7 +61,6 @@ runs:
|
||||
--data "{
|
||||
\"project\": {
|
||||
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||
\"platform_id\": \"aws\",
|
||||
\"region_id\": \"${REGION_ID}\",
|
||||
\"settings\": { }
|
||||
}
|
||||
@@ -70,11 +69,11 @@ runs:
|
||||
# Mask password
|
||||
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
||||
|
||||
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
|
||||
dsn=$(echo $project | jq --raw-output '.connection_uris[0].connection_uri')
|
||||
echo "::add-mask::${dsn}"
|
||||
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
||||
|
||||
project_id=$(echo $project | jq --raw-output '.id')
|
||||
project_id=$(echo $project | jq --raw-output '.project.id')
|
||||
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
|
||||
20
.github/actions/neon-project-delete/action.yml
vendored
20
.github/actions/neon-project-delete/action.yml
vendored
@@ -6,7 +6,7 @@ inputs:
|
||||
desctiption: 'Neon API key'
|
||||
required: true
|
||||
environment:
|
||||
desctiption: 'dev (aka captest) or stage'
|
||||
desctiption: 'dev (aka captest) or staging'
|
||||
required: true
|
||||
project_id:
|
||||
desctiption: 'ID of the Project to delete'
|
||||
@@ -37,17 +37,17 @@ runs:
|
||||
ENVIRONMENT: ${{ inputs.environment }}
|
||||
|
||||
- name: Delete Neon Project
|
||||
# Do not try to delete a project if .github/actions/neon-project-create failed before
|
||||
if: ${{ inputs.project_id != '' }}
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# Allow PROJECT_ID to be empty/null for cases when .github/actions/neon-project-create failed
|
||||
if [ -n "${PROJECT_ID}" ]; then
|
||||
curl -X "POST" \
|
||||
"https://${API_HOST}/api/v1/projects/${PROJECT_ID}/delete" \
|
||||
--fail \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}"
|
||||
fi
|
||||
curl \
|
||||
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}" \
|
||||
--fail \
|
||||
--request DELETE \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--header "Authorization: Bearer ${API_KEY}"
|
||||
env:
|
||||
API_KEY: ${{ inputs.api_key }}
|
||||
PROJECT_ID: ${{ inputs.project_id }}
|
||||
|
||||
1
.github/ansible/get_binaries.sh
vendored
1
.github/ansible/get_binaries.sh
vendored
@@ -25,6 +25,7 @@ mkdir neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
||||
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
||||
|
||||
39
.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml
vendored
Normal file
39
.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Helm chart values for neon-proxy-link.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "link"
|
||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.stage.neon.tech/psql_session/"
|
||||
|
||||
# -- Additional labels for neon-proxy-link pods
|
||||
podLabels:
|
||||
zenith_service: proxy
|
||||
zenith_env: dev
|
||||
zenith_region: us-east-2
|
||||
zenith_region_slug: us-east-2
|
||||
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build
|
||||
|
||||
#metrics:
|
||||
# enabled: true
|
||||
# serviceMonitor:
|
||||
# enabled: true
|
||||
# selector:
|
||||
# release: kube-prometheus-stack
|
||||
31
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram-legacy.yaml
vendored
Normal file
31
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram-legacy.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||
domain: "*.cloud.stage.neon.tech"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy-scram-legacy
|
||||
zenith_env: dev
|
||||
zenith_region: us-east-2
|
||||
zenith_region_slug: us-east-2
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.beta.us-east-2.aws.neon.build
|
||||
|
||||
#metrics:
|
||||
# enabled: true
|
||||
# serviceMonitor:
|
||||
# enabled: true
|
||||
# selector:
|
||||
# release: kube-prometheus-stack
|
||||
125
.github/workflows/benchmarking.yml
vendored
125
.github/workflows/benchmarking.yml
vendored
@@ -110,8 +110,14 @@ jobs:
|
||||
rm -rf perf-report-staging
|
||||
mkdir -p perf-report-staging
|
||||
# Set --sparse-ordering option of pytest-order plugin to ensure tests are running in order of appears in the file,
|
||||
# it's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --sparse-ordering --out-dir perf-report-staging --timeout 5400
|
||||
# it's important for test_perf_pgbench.py::test_pgbench_remote_* tests.
|
||||
# Do not run tests from test_runner/performance/test_perf_olap.py because they require a prepared DB. We run them separately in `clickbench-compare` job.
|
||||
./scripts/pytest test_runner/performance/ -v \
|
||||
-m "remote_cluster" \
|
||||
--sparse-ordering \
|
||||
--out-dir perf-report-staging \
|
||||
--timeout 5400 \
|
||||
--ignore test_runner/performance/test_perf_olap.py
|
||||
|
||||
- name: Submit result
|
||||
env:
|
||||
@@ -207,7 +213,7 @@ jobs:
|
||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||
;;
|
||||
rds-aurora)
|
||||
CONNSTR=${{ secrets.BENCHMARK_RDS_CONNSTR }}
|
||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CONNSTR }}
|
||||
;;
|
||||
rds-postgres)
|
||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||
@@ -225,8 +231,8 @@ jobs:
|
||||
- name: Set database options
|
||||
if: matrix.platform == 'neon-captest-prefetch'
|
||||
run: |
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET enable_seqscan_prefetch=on"
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET seqscan_prefetch_buffers=10"
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE neondb SET enable_seqscan_prefetch=on"
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE neondb SET seqscan_prefetch_buffers=10"
|
||||
env:
|
||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||
|
||||
@@ -292,3 +298,112 @@ jobs:
|
||||
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
|
||||
clickbench-compare:
|
||||
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
||||
# we use for performance testing in pgbench-compare.
|
||||
# Run this job only when pgbench-compare is finished to avoid the intersection.
|
||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||
#
|
||||
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
||||
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
||||
if: success() || failure()
|
||||
needs: [ pgbench-compare ]
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
||||
|
||||
env:
|
||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||
DEFAULT_PG_VERSION: 14
|
||||
TEST_OUTPUT: /tmp/test_output
|
||||
BUILD_TYPE: remote
|
||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||
PLATFORM: ${{ matrix.platform }}
|
||||
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rustlegacy:pinned
|
||||
options: --init
|
||||
|
||||
timeout-minutes: 360 # 6h
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Download Neon artifact
|
||||
uses: ./.github/actions/download
|
||||
with:
|
||||
name: neon-${{ runner.os }}-release-artifact
|
||||
path: /tmp/neon/
|
||||
prefix: latest
|
||||
|
||||
- name: Add Postgres binaries to PATH
|
||||
run: |
|
||||
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Connection String
|
||||
id: set-up-connstr
|
||||
run: |
|
||||
case "${PLATFORM}" in
|
||||
neon-captest-prefetch)
|
||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
||||
;;
|
||||
rds-aurora)
|
||||
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
||||
;;
|
||||
rds-postgres)
|
||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
||||
;;
|
||||
*)
|
||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||
|
||||
psql ${CONNSTR} -c "SELECT version();"
|
||||
|
||||
- name: Set database options
|
||||
if: matrix.platform == 'neon-captest-prefetch'
|
||||
run: |
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET enable_seqscan_prefetch=on"
|
||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET seqscan_prefetch_buffers=10"
|
||||
env:
|
||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||
|
||||
- name: Benchmark clickbench
|
||||
uses: ./.github/actions/run-python-test-set
|
||||
with:
|
||||
build_type: ${{ env.BUILD_TYPE }}
|
||||
test_selection: performance/test_perf_olap.py
|
||||
run_in_parallel: false
|
||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||
extra_params: -m remote_cluster --timeout 21600 -k test_clickbench
|
||||
env:
|
||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||
|
||||
- name: Create Allure report
|
||||
if: success() || failure()
|
||||
uses: ./.github/actions/allure-report
|
||||
with:
|
||||
action: generate
|
||||
build_type: ${{ env.BUILD_TYPE }}
|
||||
|
||||
- name: Post to a Slack channel
|
||||
if: ${{ github.event.schedule && failure() }}
|
||||
uses: slackapi/slack-github-action@v1
|
||||
with:
|
||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||
slack-message: "Periodic OLAP perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
|
||||
18
.github/workflows/build_and_test.yml
vendored
18
.github/workflows/build_and_test.yml
vendored
@@ -904,8 +904,12 @@ jobs:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: dev-us-east-2-beta
|
||||
deploy_link_proxy: true
|
||||
deploy_legacy_scram_proxy: true
|
||||
- target_region: eu-west-1
|
||||
target_cluster: dev-eu-west-1-zeta
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: false
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -918,11 +922,23 @@ jobs:
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Re-deploy proxy
|
||||
- name: Re-deploy scram proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy link proxy
|
||||
if: matrix.deploy_link_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy legacy scram proxy
|
||||
if: matrix.deploy_legacy_scram_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
||||
|
||||
deploy-proxy-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
|
||||
4
.github/workflows/codestyle.yml
vendored
4
.github/workflows/codestyle.yml
vendored
@@ -48,11 +48,11 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev protobuf-compiler
|
||||
|
||||
- name: Install macOS postgres dependencies
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: brew install flex bison openssl
|
||||
run: brew install flex bison openssl protobuf
|
||||
|
||||
- name: Set pg 14 revision for caching
|
||||
id: pg_v14_rev
|
||||
|
||||
938
Cargo.lock
generated
938
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,7 @@ members = [
|
||||
"pageserver",
|
||||
"proxy",
|
||||
"safekeeper",
|
||||
"storage_broker",
|
||||
"workspace_hack",
|
||||
"libs/*",
|
||||
]
|
||||
|
||||
@@ -44,7 +44,7 @@ COPY . .
|
||||
# Show build caching stats to check if it was used in the end.
|
||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||
RUN set -e \
|
||||
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin proxy --locked --release \
|
||||
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \
|
||||
&& cachepot -s
|
||||
|
||||
# Build final image
|
||||
@@ -67,6 +67,7 @@ COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver
|
||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||
|
||||
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
||||
|
||||
@@ -200,9 +200,6 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb
|
||||
# libreadline8 for psql
|
||||
# libossp-uuid16 for extension ossp-uuid
|
||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||
#
|
||||
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||
# so that we don't need to put this in another layer.
|
||||
RUN apt update && \
|
||||
apt install --no-install-recommends -y \
|
||||
libreadline8 \
|
||||
@@ -211,8 +208,7 @@ RUN apt update && \
|
||||
libgdal28 \
|
||||
libproj19 \
|
||||
libprotobuf-c1 && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
USER postgres
|
||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||
|
||||
@@ -200,9 +200,6 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb
|
||||
# libreadline8 for psql
|
||||
# libossp-uuid16 for extension ossp-uuid
|
||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||
#
|
||||
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
||||
# so that we don't need to put this in another layer.
|
||||
RUN apt update && \
|
||||
apt install --no-install-recommends -y \
|
||||
libreadline8 \
|
||||
@@ -211,8 +208,7 @@ RUN apt update && \
|
||||
libgdal28 \
|
||||
libproj19 \
|
||||
libprotobuf-c1 && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
USER postgres
|
||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||
|
||||
@@ -35,12 +35,12 @@ Pageserver consists of:
|
||||
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||
```bash
|
||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client
|
||||
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client protobuf-compiler
|
||||
```
|
||||
* On Fedora, these packages are needed:
|
||||
```bash
|
||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib
|
||||
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib protobuf-compiler
|
||||
```
|
||||
|
||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||
|
||||
@@ -23,6 +23,7 @@ url = "2.2.2"
|
||||
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
||||
# instead, so that recompile times are better.
|
||||
pageserver_api = { path = "../libs/pageserver_api" }
|
||||
postgres_connection = { path = "../libs/postgres_connection" }
|
||||
safekeeper_api = { path = "../libs/safekeeper_api" }
|
||||
utils = { path = "../libs/utils" }
|
||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||
|
||||
@@ -49,11 +49,16 @@ pub enum InitialPidFile<'t> {
|
||||
}
|
||||
|
||||
/// Start a background child process using the parameters given.
|
||||
pub fn start_process<F, S: AsRef<OsStr>>(
|
||||
pub fn start_process<
|
||||
F,
|
||||
S: AsRef<OsStr>,
|
||||
EI: IntoIterator<Item = (String, String)>, // Not generic AsRef<OsStr>, otherwise empty `envs` prevents type inference
|
||||
>(
|
||||
process_name: &str,
|
||||
datadir: &Path,
|
||||
command: &Path,
|
||||
args: &[S],
|
||||
envs: EI,
|
||||
initial_pid_file: InitialPidFile,
|
||||
process_status_check: F,
|
||||
) -> anyhow::Result<Child>
|
||||
@@ -79,6 +84,7 @@ where
|
||||
.stderr(same_file_for_stderr)
|
||||
.args(args);
|
||||
let filled_cmd = fill_aws_secrets_vars(fill_rust_env_vars(background_command));
|
||||
filled_cmd.envs(envs);
|
||||
|
||||
let mut spawned_process = filled_cmd.spawn().with_context(|| {
|
||||
format!("Could not spawn {process_name}, see console output and log files for details.")
|
||||
@@ -141,6 +147,22 @@ where
|
||||
anyhow::bail!("{process_name} did not start in {RETRY_UNTIL_SECS} seconds");
|
||||
}
|
||||
|
||||
/// Send SIGTERM to child process
|
||||
pub fn send_stop_child_process(child: &std::process::Child) -> anyhow::Result<()> {
|
||||
let pid = child.id();
|
||||
match kill(
|
||||
nix::unistd::Pid::from_raw(pid.try_into().unwrap()),
|
||||
Signal::SIGTERM,
|
||||
) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(Errno::ESRCH) => {
|
||||
println!("child process with pid {pid} does not exist");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => anyhow::bail!("Failed to send signal to child process with pid {pid}: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Stops the process, using the pid file given. Returns Ok also if the process is already not running.
|
||||
pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> anyhow::Result<()> {
|
||||
if !pid_file.exists() {
|
||||
@@ -173,11 +195,6 @@ pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> any
|
||||
match process_has_stopped(pid) {
|
||||
Ok(true) => {
|
||||
println!("\n{process_name} stopped");
|
||||
if let Err(e) = fs::remove_file(pid_file) {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
eprintln!("Failed to remove pid file {pid_file:?} after stopping the process: {e:#}");
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
Ok(false) => {
|
||||
@@ -203,7 +220,14 @@ pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> any
|
||||
}
|
||||
|
||||
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
||||
let mut filled_cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
||||
// If RUST_BACKTRACE is set, pass it through. But if it's not set, default
|
||||
// to RUST_BACKTRACE=1.
|
||||
let backtrace_setting = std::env::var_os("RUST_BACKTRACE");
|
||||
let backtrace_setting = backtrace_setting
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| OsStr::new("1"));
|
||||
|
||||
let mut filled_cmd = cmd.env_clear().env("RUST_BACKTRACE", backtrace_setting);
|
||||
|
||||
// Pass through these environment variables to the command
|
||||
for var in ["LLVM_PROFILE_FILE", "FAILPOINTS", "RUST_LOG"] {
|
||||
|
||||
@@ -322,6 +322,9 @@ impl PostgresNode {
|
||||
conf.append("shared_preload_libraries", "neon");
|
||||
conf.append_line("");
|
||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
||||
if let AuthType::NeonJWT = auth_type {
|
||||
conf.append("neon.safekeeper_token_env", "$ZENITH_AUTH_TOKEN");
|
||||
}
|
||||
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
||||
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
||||
if let Some(lsn) = self.lsn {
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
use url::Url;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PgConnectionConfig {
|
||||
url: Url,
|
||||
}
|
||||
|
||||
impl PgConnectionConfig {
|
||||
pub fn host(&self) -> &str {
|
||||
self.url.host_str().expect("BUG: no host")
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
self.url.port().expect("BUG: no port")
|
||||
}
|
||||
|
||||
/// Return a `<host>:<port>` string.
|
||||
pub fn raw_address(&self) -> String {
|
||||
format!("{}:{}", self.host(), self.port())
|
||||
}
|
||||
|
||||
/// Connect using postgres protocol with TLS disabled.
|
||||
pub fn connect_no_tls(&self) -> Result<postgres::Client, postgres::Error> {
|
||||
postgres::Client::connect(self.url.as_str(), postgres::NoTls)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for PgConnectionConfig {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut url: Url = s.parse()?;
|
||||
|
||||
match url.scheme() {
|
||||
"postgres" | "postgresql" => {}
|
||||
other => anyhow::bail!("invalid scheme: {other}"),
|
||||
}
|
||||
|
||||
// It's not a valid connection url if host is unavailable.
|
||||
if url.host().is_none() {
|
||||
anyhow::bail!(url::ParseError::EmptyHost);
|
||||
}
|
||||
|
||||
// E.g. `postgres:bar`.
|
||||
if url.cannot_be_a_base() {
|
||||
anyhow::bail!("URL cannot be a base");
|
||||
}
|
||||
|
||||
// Set the default PG port if it's missing.
|
||||
if url.port().is_none() {
|
||||
url.set_port(Some(5432))
|
||||
.expect("BUG: couldn't set the default port");
|
||||
}
|
||||
|
||||
Ok(Self { url })
|
||||
}
|
||||
}
|
||||
@@ -39,6 +39,7 @@ pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||
&etcd_data_dir,
|
||||
&etcd_broker.etcd_binary_path,
|
||||
&args,
|
||||
[],
|
||||
background_process::InitialPidFile::Create(&pid_file_path),
|
||||
|| {
|
||||
for broker_endpoint in &etcd_broker.broker_endpoints {
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
mod background_process;
|
||||
pub mod compute;
|
||||
pub mod connection;
|
||||
pub mod etcd;
|
||||
pub mod local_env;
|
||||
pub mod pageserver;
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fs::{self, File};
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader, Write};
|
||||
use std::num::NonZeroU64;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Child;
|
||||
use std::{io, result};
|
||||
|
||||
use crate::connection::PgConnectionConfig;
|
||||
use anyhow::{bail, Context};
|
||||
use anyhow::{bail, ensure, Context};
|
||||
use pageserver_api::models::{
|
||||
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
||||
};
|
||||
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||
use reqwest::{IntoUrl, Method};
|
||||
use thiserror::Error;
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::{
|
||||
http::error::HttpErrorBody,
|
||||
id::{TenantId, TimelineId},
|
||||
@@ -77,30 +78,24 @@ pub struct PageServerNode {
|
||||
|
||||
impl PageServerNode {
|
||||
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
||||
let (host, port) = parse_host_port(&env.pageserver.listen_pg_addr)
|
||||
.expect("Unable to parse listen_pg_addr");
|
||||
let port = port.unwrap_or(5432);
|
||||
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
||||
&env.pageserver.auth_token
|
||||
Some(env.pageserver.auth_token.clone())
|
||||
} else {
|
||||
""
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
pg_connection_config: Self::pageserver_connection_config(
|
||||
password,
|
||||
&env.pageserver.listen_pg_addr,
|
||||
),
|
||||
pg_connection_config: PgConnectionConfig::new_host_port(host, port)
|
||||
.set_password(password),
|
||||
env: env.clone(),
|
||||
http_client: Client::new(),
|
||||
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct libpq connection string for connecting to the pageserver.
|
||||
fn pageserver_connection_config(password: &str, listen_addr: &str) -> PgConnectionConfig {
|
||||
format!("postgresql://no_user:{password}@{listen_addr}/no_db")
|
||||
.parse()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn initialize(
|
||||
&self,
|
||||
create_tenant: Option<TenantId>,
|
||||
@@ -173,29 +168,21 @@ impl PageServerNode {
|
||||
}
|
||||
Err(e) => eprintln!("{e:#}"),
|
||||
}
|
||||
match pageserver_process.kill() {
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
"Failed to stop pageserver {} process with pid {}: {e:#}",
|
||||
self.env.pageserver.id,
|
||||
pageserver_process.id(),
|
||||
)
|
||||
}
|
||||
Ok(()) => {
|
||||
println!(
|
||||
"Stopped pageserver {} process with pid {}",
|
||||
self.env.pageserver.id,
|
||||
pageserver_process.id(),
|
||||
);
|
||||
// cleanup after pageserver startup, since we do not call regular `stop_process` during init
|
||||
let pid_file = self.pid_file();
|
||||
if let Err(e) = fs::remove_file(&pid_file) {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
eprintln!("Failed to remove pid file {pid_file:?} after stopping the process: {e:#}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
background_process::send_stop_child_process(&pageserver_process)?;
|
||||
|
||||
let exit_code = pageserver_process.wait()?;
|
||||
ensure!(
|
||||
exit_code.success(),
|
||||
format!(
|
||||
"pageserver init failed with exit code {:?}",
|
||||
exit_code.code()
|
||||
)
|
||||
);
|
||||
println!(
|
||||
"Stopped pageserver {} process with pid {}",
|
||||
self.env.pageserver.id,
|
||||
pageserver_process.id(),
|
||||
);
|
||||
init_result
|
||||
}
|
||||
|
||||
@@ -259,11 +246,21 @@ impl PageServerNode {
|
||||
args.extend(["-c", config_override]);
|
||||
}
|
||||
|
||||
let envs = if self.env.pageserver.auth_type != AuthType::Trust {
|
||||
// Generate a token to connect from the pageserver to a safekeeper
|
||||
let token = self
|
||||
.env
|
||||
.generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?;
|
||||
vec![("ZENITH_AUTH_TOKEN".to_owned(), token)]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
background_process::start_process(
|
||||
"pageserver",
|
||||
datadir,
|
||||
&self.env.pageserver_bin(),
|
||||
&args,
|
||||
envs,
|
||||
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||
|| match self.check_status() {
|
||||
Ok(()) => Ok(true),
|
||||
|
||||
@@ -5,12 +5,12 @@ use std::sync::Arc;
|
||||
use std::{io, result};
|
||||
|
||||
use anyhow::Context;
|
||||
use postgres_connection::PgConnectionConfig;
|
||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||
use reqwest::{IntoUrl, Method};
|
||||
use thiserror::Error;
|
||||
use utils::{http::error::HttpErrorBody, id::NodeId};
|
||||
|
||||
use crate::connection::PgConnectionConfig;
|
||||
use crate::pageserver::PageServerNode;
|
||||
use crate::{
|
||||
background_process,
|
||||
@@ -86,10 +86,7 @@ impl SafekeeperNode {
|
||||
|
||||
/// Construct libpq connection string for connecting to this safekeeper.
|
||||
fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
|
||||
// TODO safekeeper authentication not implemented yet
|
||||
format!("postgresql://no_user@127.0.0.1:{port}/no_db")
|
||||
.parse()
|
||||
.unwrap()
|
||||
PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port)
|
||||
}
|
||||
|
||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
||||
@@ -169,6 +166,7 @@ impl SafekeeperNode {
|
||||
&datadir,
|
||||
&self.env.safekeeper_bin(),
|
||||
&args,
|
||||
[],
|
||||
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||
|| match self.check_status() {
|
||||
Ok(()) => Ok(true),
|
||||
|
||||
@@ -1,30 +1,154 @@
|
||||
## Authentication
|
||||
|
||||
### Overview
|
||||
We use JWT tokens in communication between almost all components (compute, pageserver, safekeeper, CLI) regardless of the protocol used (HTTP/PostgreSQL).
|
||||
Etcd currently has no authentication.
|
||||
Authentication is optional and is disabled by default for easier debugging.
|
||||
It is used in some tests, though.
|
||||
Note that we do not cover authentication with `pg.neon.tech` here.
|
||||
|
||||
Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `neon_local init`. Using following openssl commands:
|
||||
For HTTP connections we use the Bearer authentication scheme.
|
||||
For PostgreSQL connections we expect the token to be passed as a password.
|
||||
There is a caveat for `psql`: it silently truncates passwords to 100 symbols, so to correctly pass JWT via `psql` you have to either use `PGPASSWORD` environment variable, or store password in `psql`'s config file.
|
||||
|
||||
Current token scopes are described in `utils::auth::Scope`.
|
||||
There are no expiration or rotation schemes.
|
||||
|
||||
_TODO_: some scopes allow both access to server management API and to the data.
|
||||
These probably should be split into multiple scopes.
|
||||
|
||||
Tokens should not occur in logs.
|
||||
They may sometimes occur in configuration files, although this is discouraged
|
||||
because configs may be parsed and dumped into logs.
|
||||
|
||||
#### Tokens generation and validation
|
||||
JWT tokens are signed using a private key.
|
||||
Compute/pageserver/safekeeper use the private key's public counterpart to validate JWT tokens.
|
||||
These components should not have access to the private key and may only get tokens from their configuration or external clients.
|
||||
|
||||
The key pair is generated once for an installation of compute/pageserver/safekeeper, e.g. by `neon_local init`.
|
||||
There is currently no way to rotate the key without bringing down all components.
|
||||
|
||||
### CLI
|
||||
CLI generates a key pair during call to `neon_local init` with the following commands:
|
||||
|
||||
```bash
|
||||
openssl genrsa -out private_key.pem 2048
|
||||
openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||
openssl genrsa -out auth_private_key.pem 2048
|
||||
openssl rsa -in auth_private_key.pem -pubout -outform PEM -out auth_public_key.pem
|
||||
```
|
||||
|
||||
CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `NeonJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file.
|
||||
Configuration files for all components point to `public_key.pem` for JWT validation.
|
||||
However, authentication is disabled by default.
|
||||
There is no way to automatically enable it everywhere, you have to configure each component individually.
|
||||
|
||||
Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver.
|
||||
CLI also generates signed token (full access to Pageserver) and saves it in
|
||||
the CLI's `config` file under `pageserver.auth_token`.
|
||||
Note that pageserver's config does not have any similar parameter.
|
||||
CLI is the only component which accesses that token.
|
||||
Technically it could generate it from the private key on each run,
|
||||
but it does not do that for some reason (_TODO_).
|
||||
|
||||
Compute uses token passed via environment variable to communicate to pageserver and in the future to the safekeeper too.
|
||||
### Compute
|
||||
#### Overview
|
||||
Compute is a per-timeline PostgreSQL instance, so it should not have
|
||||
any access to data of other tenants.
|
||||
All tokens used by a compute are restricted to a specific tenant.
|
||||
There is no auth isolation from other timelines of the same tenant,
|
||||
but a non-rogue client never accesses another timeline even by an accident:
|
||||
timeline IDs are random and hard to guess.
|
||||
|
||||
JWT authentication now supports two scopes: tenant and pageserverapi. Tenant scope is intended for use in tenant related api calls, e.g. create_branch. Compute launched for particular tenant also uses this scope. Scope pageserver api is intended to be used by console to manage pageserver. For now we have only one management operation - create tenant.
|
||||
#### Incoming connections
|
||||
All incoming connections are from PostgreSQL clients.
|
||||
Their authentication is just plain PostgreSQL authentication and out of scope for this document.
|
||||
|
||||
Examples for token generation in python:
|
||||
There is no administrative API except those provided by PostgreSQL.
|
||||
|
||||
#### Outgoing connections
|
||||
Compute connects to Pageserver for getting pages.
|
||||
The connection string is configured by the `neon.pageserver_connstring` PostgreSQL GUC, e.g. `postgresql://no_user:$ZENITH_AUTH_TOKEN@localhost:15028`.
|
||||
The environment variable inside the connection string is substituted with
|
||||
the JWT token.
|
||||
|
||||
Compute connects to Safekeepers to write and commit data.
|
||||
The token is the same for all safekeepers.
|
||||
It's stored in an environment variable, whose name is configured
|
||||
by the `neon.safekeeper_token_env` PostgreSQL GUC.
|
||||
If the GUC is unset, no token is passed.
|
||||
|
||||
Note that both tokens can be (and typically are) the same;
|
||||
the scope is the tenant and the token is usually passed through the
|
||||
`$ZENITH_AUTH_TOKEN` environment variable.
|
||||
|
||||
### Pageserver
|
||||
#### Overview
|
||||
Pageserver keeps track of multiple tenants, each having multiple timelines.
|
||||
For each timeline, it connects to the corresponding Safekeeper.
|
||||
Information about "corresponding Safekeeper" is published by Safekeepers
|
||||
in the Etcd, but they do not publish access tokens, otherwise what is
|
||||
the point of authentication.
|
||||
|
||||
Pageserver keeps a connection to some set of Safekeepers, which
|
||||
may or may not correspond to active Computes.
|
||||
Hence, we cannot obtain a per-timeline access token from a Compute.
|
||||
E.g. if the timeline's Compute terminates before all WAL is
|
||||
consumed by the Pageserver, the Pageserver continues consuming WAL.
|
||||
|
||||
Pageserver replicas' authentication is the same as the main's.
|
||||
|
||||
#### Incoming connections
|
||||
Pageserver listens for connections from computes.
|
||||
Each compute should present a token valid for the timeline's tenant.
|
||||
|
||||
Pageserver also has HTTP API: some parts are per-tenant,
|
||||
some parts are server-wide, these are different scopes.
|
||||
|
||||
The `auth_type` configuration variable in Pageserver's config may have
|
||||
either of three values:
|
||||
|
||||
* `Trust` removes all authentication. The outdated `MD5` value does likewise
|
||||
* `NeonJWT` enables JWT validation.
|
||||
Tokens are validated using the public key which lies in a PEM file
|
||||
specified in the `auth_validation_public_key_path` config.
|
||||
|
||||
#### Outgoing connections
|
||||
Pageserver makes a connection to a Safekeeper for each active timeline.
|
||||
As Pageserver may want to access any timeline it has on the disk,
|
||||
it is given a blanket JWT token to access any data on any Safekeeper.
|
||||
This token is passed through an environment variable called `ZENITH_AUTH_TOKEN`
|
||||
(non-configurable as of writing this text).
|
||||
|
||||
A better way _may be_ to store JWT token for each timeline next to it,
|
||||
but may be not.
|
||||
|
||||
### Safekeeper
|
||||
#### Overview
|
||||
Safekeeper keeps track of multiple tenants, each having multiple timelines.
|
||||
|
||||
#### Incoming connections
|
||||
Safekeeper accepts connections from Compute/Pageserver, each
|
||||
connection corresponds to a specific timeline and requires
|
||||
a corresponding JWT token.
|
||||
|
||||
Safekeeper also has HTTP API: some parts are per-tenant,
|
||||
some parts are server-wide, these are different scopes.
|
||||
|
||||
The `auth-validation-public-key-path` command line options controls
|
||||
the authentication mode:
|
||||
|
||||
* If the option is missing, there is no authentication or JWT token validation.
|
||||
* If the option is present, it should be a path to the public key PEM file used for JWT token validation.
|
||||
|
||||
#### Outgoing connections
|
||||
No connections are initiated by a Safekeeper.
|
||||
|
||||
### In the source code
|
||||
Tests do not use authentication by default.
|
||||
If you need it, you can enable it by configuring the test's environment:
|
||||
|
||||
```python
|
||||
# generate pageserverapi token
|
||||
management_token = jwt.encode({"scope": "pageserverapi"}, auth_keys.priv, algorithm="RS256")
|
||||
|
||||
# generate tenant token
|
||||
tenant_token = jwt.encode({"scope": "tenant", "tenant_id": ps.initial_tenant}, auth_keys.priv, algorithm="RS256")
|
||||
neon_env_builder.auth_enabled = True
|
||||
```
|
||||
|
||||
Utility functions to work with jwts in rust are located in libs/utils/src/auth.rs
|
||||
You will have to generate tokens if you want to access components inside the test directly,
|
||||
use `AuthKeys.generate_*_token` methods for that.
|
||||
If you create a new scope, please create a new method to prevent mistypes in scope's name.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Below you will find a brief overview of each subdir in the source tree in alphabetical order.
|
||||
|
||||
`storage_broker`:
|
||||
|
||||
Neon storage broker, providing messaging between safekeepers and pageservers.
|
||||
[storage_broker.md](./storage_broker.md)
|
||||
|
||||
`/control_plane`:
|
||||
|
||||
Local control plane.
|
||||
|
||||
27
docs/storage_broker.md
Normal file
27
docs/storage_broker.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Storage broker
|
||||
|
||||
Storage broker targets two issues:
|
||||
- Allowing safekeepers and pageservers learn which nodes also hold their
|
||||
timelines, and timeline statuses there.
|
||||
- Avoiding O(n^2) connections between storage nodes while doing so.
|
||||
|
||||
This is used
|
||||
- By pageservers to determine the most advanced and alive safekeeper to pull WAL from.
|
||||
- By safekeepers to synchronize on the timeline: advance
|
||||
`remote_consistent_lsn`, `backup_lsn`, choose who offloads WAL to s3.
|
||||
|
||||
Technically, it is a simple stateless pub-sub message broker based on tonic
|
||||
(grpc) making multiplexing easy. Since it is stateless, fault tolerance can be
|
||||
provided by k8s; there is no built in replication support, though it is not hard
|
||||
to add.
|
||||
|
||||
Currently, the only message is `SafekeeperTimelineInfo`. Each safekeeper, for
|
||||
each active timeline, once in a while pushes timeline status to the broker.
|
||||
Other nodes subscribe and receive this info, using it per above.
|
||||
|
||||
Broker serves /metrics on the same port as grpc service.
|
||||
|
||||
grpcurl can be used to check which values are currently being pushed:
|
||||
```
|
||||
grpcurl -proto broker/proto/broker.proto -d '{"all":{}}' -plaintext localhost:50051 storage_broker.BrokerService/SubscribeSafekeeperInfo
|
||||
```
|
||||
@@ -15,19 +15,37 @@ use bytes::{BufMut, Bytes, BytesMut};
|
||||
/// A state of a tenant in pageserver's memory.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
pub enum TenantState {
|
||||
/// Tenant is fully operational, its background jobs might be running or not.
|
||||
Active { background_jobs_running: bool },
|
||||
/// A tenant is recognized by pageserver, but not yet ready to operate:
|
||||
/// e.g. not present locally and being downloaded or being read into memory from the file system.
|
||||
Paused,
|
||||
/// A tenant is recognized by the pageserver, but no longer used for any operations, as failed to get activated.
|
||||
// This tenant is being loaded from local disk
|
||||
Loading,
|
||||
// This tenant is being downloaded from cloud storage.
|
||||
Attaching,
|
||||
/// Tenant is fully operational
|
||||
Active,
|
||||
/// A tenant is recognized by pageserver, but it is being detached or the
|
||||
/// system is being shut down.
|
||||
Stopping,
|
||||
/// A tenant is recognized by the pageserver, but can no longer be used for
|
||||
/// any operations, because it failed to be activated.
|
||||
Broken,
|
||||
}
|
||||
|
||||
impl TenantState {
|
||||
pub fn has_in_progress_downloads(&self) -> bool {
|
||||
match self {
|
||||
Self::Loading => true,
|
||||
Self::Attaching => true,
|
||||
Self::Active => false,
|
||||
Self::Stopping => false,
|
||||
Self::Broken => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A state of a timeline in pageserver's memory.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
pub enum TimelineState {
|
||||
/// Timeline is fully operational, its background jobs are running.
|
||||
/// Timeline is fully operational. If the containing Tenant is Active, the timeline's
|
||||
/// background jobs are running otherwise they will be launched when the tenant is activated.
|
||||
Active,
|
||||
/// A timeline is recognized by pageserver, but not yet ready to operate.
|
||||
/// The status indicates, that the timeline could eventually go back to Active automatically:
|
||||
@@ -35,8 +53,9 @@ pub enum TimelineState {
|
||||
Suspended,
|
||||
/// A timeline is recognized by pageserver, but not yet ready to operate and not allowed to
|
||||
/// automatically become Active after certain events: only a management call can change this status.
|
||||
Paused,
|
||||
/// A timeline is recognized by the pageserver, but no longer used for any operations, as failed to get activated.
|
||||
Stopping,
|
||||
/// A timeline is recognized by the pageserver, but can no longer be used for
|
||||
/// any operations, because it failed to be activated.
|
||||
Broken,
|
||||
}
|
||||
|
||||
@@ -168,6 +187,8 @@ pub struct TimelineInfo {
|
||||
pub latest_gc_cutoff_lsn: Lsn,
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
pub disk_consistent_lsn: Lsn,
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
pub remote_consistent_lsn: Lsn,
|
||||
pub current_logical_size: Option<u64>, // is None when timeline is Unloaded
|
||||
pub current_physical_size: Option<u64>, // is None when timeline is Unloaded
|
||||
pub current_logical_size_non_incremental: Option<u64>,
|
||||
@@ -180,8 +201,6 @@ pub struct TimelineInfo {
|
||||
pub last_received_msg_ts: Option<u128>,
|
||||
pub pg_version: u32,
|
||||
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
pub remote_consistent_lsn: Option<Lsn>,
|
||||
pub awaits_download: bool,
|
||||
|
||||
pub state: TimelineState,
|
||||
|
||||
17
libs/postgres_connection/Cargo.toml
Normal file
17
libs/postgres_connection/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "postgres_connection"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
itertools = "0.10.3"
|
||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
url = "2.2.2"
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
|
||||
[dev-dependencies]
|
||||
once_cell = "1.13.0"
|
||||
253
libs/postgres_connection/src/lib.rs
Normal file
253
libs/postgres_connection/src/lib.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
use anyhow::{bail, Context};
|
||||
use itertools::Itertools;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
use url::Host;
|
||||
|
||||
/// Parses a string of format either `host:port` or `host` into a corresponding pair.
|
||||
/// The `host` part should be a correct `url::Host`, while `port` (if present) should be
|
||||
/// a valid decimal u16 of digits only.
|
||||
pub fn parse_host_port<S: AsRef<str>>(host_port: S) -> Result<(Host, Option<u16>), anyhow::Error> {
|
||||
let (host, port) = match host_port.as_ref().rsplit_once(':') {
|
||||
Some((host, port)) => (
|
||||
host,
|
||||
// +80 is a valid u16, but not a valid port
|
||||
if port.chars().all(|c| c.is_ascii_digit()) {
|
||||
Some(port.parse::<u16>().context("Unable to parse port")?)
|
||||
} else {
|
||||
bail!("Port contains a non-ascii-digit")
|
||||
},
|
||||
),
|
||||
None => (host_port.as_ref(), None), // No colons, no port specified
|
||||
};
|
||||
let host = Host::parse(host).context("Unable to parse host")?;
|
||||
Ok((host, port))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_parse_host_port {
|
||||
use crate::parse_host_port;
|
||||
use url::Host;
|
||||
|
||||
#[test]
|
||||
fn test_normal() {
|
||||
let (host, port) = parse_host_port("hello:123").unwrap();
|
||||
assert_eq!(host, Host::Domain("hello".to_owned()));
|
||||
assert_eq!(port, Some(123));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_port() {
|
||||
let (host, port) = parse_host_port("hello").unwrap();
|
||||
assert_eq!(host, Host::Domain("hello".to_owned()));
|
||||
assert_eq!(port, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ipv6() {
|
||||
let (host, port) = parse_host_port("[::1]:123").unwrap();
|
||||
assert_eq!(host, Host::<String>::Ipv6(std::net::Ipv6Addr::LOCALHOST));
|
||||
assert_eq!(port, Some(123));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_host() {
|
||||
assert!(parse_host_port("hello world").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_port() {
|
||||
assert!(parse_host_port("hello:+80").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PgConnectionConfig {
|
||||
host: Host,
|
||||
port: u16,
|
||||
password: Option<String>,
|
||||
options: Vec<String>,
|
||||
}
|
||||
|
||||
/// A simplified PostgreSQL connection configuration. Supports only a subset of possible
|
||||
/// settings for simplicity. A password getter or `to_connection_string` methods are not
|
||||
/// added by design to avoid accidentally leaking password through logging, command line
|
||||
/// arguments to a child process, or likewise.
|
||||
impl PgConnectionConfig {
|
||||
pub fn new_host_port(host: Host, port: u16) -> Self {
|
||||
PgConnectionConfig {
|
||||
host,
|
||||
port,
|
||||
password: None,
|
||||
options: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn host(&self) -> &Host {
|
||||
&self.host
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
|
||||
pub fn set_host(mut self, h: Host) -> Self {
|
||||
self.host = h;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_port(mut self, p: u16) -> Self {
|
||||
self.port = p;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_password(mut self, s: Option<String>) -> Self {
|
||||
self.password = s;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn extend_options<I: IntoIterator<Item = S>, S: Into<String>>(mut self, i: I) -> Self {
|
||||
self.options.extend(i.into_iter().map(|s| s.into()));
|
||||
self
|
||||
}
|
||||
|
||||
/// Return a `<host>:<port>` string.
|
||||
pub fn raw_address(&self) -> String {
|
||||
format!("{}:{}", self.host(), self.port())
|
||||
}
|
||||
|
||||
/// Build a client library-specific connection configuration.
|
||||
/// Used for testing and when we need to add some obscure configuration
|
||||
/// elements at the last moment.
|
||||
pub fn to_tokio_postgres_config(&self) -> tokio_postgres::Config {
|
||||
// Use `tokio_postgres::Config` instead of `postgres::Config` because
|
||||
// the former supports more options to fiddle with later.
|
||||
let mut config = tokio_postgres::Config::new();
|
||||
config.host(&self.host().to_string()).port(self.port);
|
||||
if let Some(password) = &self.password {
|
||||
config.password(password);
|
||||
}
|
||||
if !self.options.is_empty() {
|
||||
// These options are command-line options and should be escaped before being passed
|
||||
// as an 'options' connection string parameter, see
|
||||
// https://www.postgresql.org/docs/15/libpq-connect.html#LIBPQ-CONNECT-OPTIONS
|
||||
//
|
||||
// They will be space-separated, so each space inside an option should be escaped,
|
||||
// and all backslashes should be escaped before that. Although we don't expect options
|
||||
// with spaces at the moment, they're supported by PostgreSQL. Hence we support them
|
||||
// in this typesafe interface.
|
||||
//
|
||||
// We use `Cow` to avoid allocations in the best case (no escaping). A fully imperative
|
||||
// solution would require 1-2 allocations in the worst case as well, but it's harder to
|
||||
// implement and this function is hardly a bottleneck. The function is only called around
|
||||
// establishing a new connection.
|
||||
#[allow(unstable_name_collisions)]
|
||||
config.options(
|
||||
&self
|
||||
.options
|
||||
.iter()
|
||||
.map(|s| {
|
||||
if s.contains(['\\', ' ']) {
|
||||
Cow::Owned(s.replace('\\', "\\\\").replace(' ', "\\ "))
|
||||
} else {
|
||||
Cow::Borrowed(s.as_str())
|
||||
}
|
||||
})
|
||||
.intersperse(Cow::Borrowed(" ")) // TODO: use impl from std once it's stabilized
|
||||
.collect::<String>(),
|
||||
);
|
||||
}
|
||||
config
|
||||
}
|
||||
|
||||
/// Connect using postgres protocol with TLS disabled.
|
||||
pub fn connect_no_tls(&self) -> Result<postgres::Client, postgres::Error> {
|
||||
postgres::Config::from(self.to_tokio_postgres_config()).connect(postgres::NoTls)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PgConnectionConfig {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// We want `password: Some(REDACTED-STRING)`, not `password: Some("REDACTED-STRING")`
|
||||
// so even if the password is `REDACTED-STRING` (quite unlikely) there is no confusion.
|
||||
// Hence `format_args!()`, it returns a "safe" string which is not escaped by `Debug`.
|
||||
f.debug_struct("PgConnectionConfig")
|
||||
.field("host", &self.host)
|
||||
.field("port", &self.port)
|
||||
.field(
|
||||
"password",
|
||||
&self
|
||||
.password
|
||||
.as_ref()
|
||||
.map(|_| format_args!("REDACTED-STRING")),
|
||||
)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_pg_connection_config {
|
||||
use crate::PgConnectionConfig;
|
||||
use once_cell::sync::Lazy;
|
||||
use url::Host;
|
||||
|
||||
static STUB_HOST: Lazy<Host> = Lazy::new(|| Host::Domain("stub.host.example".to_owned()));
|
||||
|
||||
#[test]
|
||||
fn test_no_password() {
|
||||
let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123);
|
||||
assert_eq!(cfg.host(), &*STUB_HOST);
|
||||
assert_eq!(cfg.port(), 123);
|
||||
assert_eq!(cfg.raw_address(), "stub.host.example:123");
|
||||
assert_eq!(
|
||||
format!("{:?}", cfg),
|
||||
"PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: None }"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ipv6() {
|
||||
// May be a special case because hostname contains a colon.
|
||||
let cfg = PgConnectionConfig::new_host_port(Host::parse("[::1]").unwrap(), 123);
|
||||
assert_eq!(
|
||||
cfg.host(),
|
||||
&Host::<String>::Ipv6(std::net::Ipv6Addr::LOCALHOST)
|
||||
);
|
||||
assert_eq!(cfg.port(), 123);
|
||||
assert_eq!(cfg.raw_address(), "[::1]:123");
|
||||
assert_eq!(
|
||||
format!("{:?}", cfg),
|
||||
"PgConnectionConfig { host: Ipv6(::1), port: 123, password: None }"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_password() {
|
||||
let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123)
|
||||
.set_password(Some("password".to_owned()));
|
||||
assert_eq!(cfg.host(), &*STUB_HOST);
|
||||
assert_eq!(cfg.port(), 123);
|
||||
assert_eq!(cfg.raw_address(), "stub.host.example:123");
|
||||
assert_eq!(
|
||||
format!("{:?}", cfg),
|
||||
"PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: Some(REDACTED-STRING) }"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_options() {
|
||||
let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123).extend_options([
|
||||
"hello",
|
||||
"world",
|
||||
"with space",
|
||||
"and \\ backslashes",
|
||||
]);
|
||||
assert_eq!(cfg.host(), &*STUB_HOST);
|
||||
assert_eq!(cfg.port(), 123);
|
||||
assert_eq!(cfg.raw_address(), "stub.host.example:123");
|
||||
assert_eq!(
|
||||
cfg.to_tokio_postgres_config().get_options(),
|
||||
Some("hello world with\\ space and\\ \\\\\\ backslashes")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -163,6 +163,27 @@ pub fn page_set_lsn(pg: &mut [u8], lsn: Lsn) {
|
||||
pg[4..8].copy_from_slice(&(lsn.0 as u32).to_le_bytes());
|
||||
}
|
||||
|
||||
// This is port of function with the same name from freespace.c.
|
||||
// The only difference is that it does not have "level" parameter because XLogRecordPageWithFreeSpace
|
||||
// always call it with level=FSM_BOTTOM_LEVEL
|
||||
pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber {
|
||||
let mut leafno = addr;
|
||||
const FSM_TREE_DEPTH: u32 = if pg_constants::SLOTS_PER_FSM_PAGE >= 1626 {
|
||||
3
|
||||
} else {
|
||||
4
|
||||
};
|
||||
|
||||
/* Count upper level nodes required to address the leaf page */
|
||||
let mut pages: BlockNumber = 0;
|
||||
for _l in 0..FSM_TREE_DEPTH {
|
||||
pages += leafno + 1;
|
||||
leafno /= pg_constants::SLOTS_PER_FSM_PAGE;
|
||||
}
|
||||
/* Turn the page count into 0-based block number */
|
||||
pages - 1
|
||||
}
|
||||
|
||||
pub mod waldecoder {
|
||||
|
||||
use crate::{v14, v15};
|
||||
|
||||
@@ -197,6 +197,16 @@ pub const XLOG_CHECKPOINT_SHUTDOWN: u8 = 0x00;
|
||||
pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10;
|
||||
pub const XLP_LONG_HEADER: u16 = 0x0002;
|
||||
|
||||
/* From fsm_internals.h */
|
||||
const FSM_NODES_PER_PAGE: usize = BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA - 4;
|
||||
const FSM_NON_LEAF_NODES_PER_PAGE: usize = BLCKSZ as usize / 2 - 1;
|
||||
const FSM_LEAF_NODES_PER_PAGE: usize = FSM_NODES_PER_PAGE - FSM_NON_LEAF_NODES_PER_PAGE;
|
||||
pub const SLOTS_PER_FSM_PAGE: u32 = FSM_LEAF_NODES_PER_PAGE as u32;
|
||||
|
||||
/* From visibilitymap.c */
|
||||
pub const VM_HEAPBLOCKS_PER_PAGE: u32 =
|
||||
(BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA) as u32 * (8 / 2); // MAPSIZE * (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
|
||||
|
||||
// List of subdirectories inside pgdata.
|
||||
// Copied from src/bin/initdb/initdb.c
|
||||
pub const PGDATA_SUBDIRS: [&str; 22] = [
|
||||
|
||||
@@ -9,8 +9,11 @@ async-trait = "0.1"
|
||||
metrics = { version = "0.1", path = "../metrics" }
|
||||
utils = { version = "0.1", path = "../utils" }
|
||||
once_cell = "1.13.0"
|
||||
rusoto_core = "0.48"
|
||||
rusoto_s3 = "0.48"
|
||||
aws-smithy-http = "0.51.0"
|
||||
aws-types = "0.51.0"
|
||||
aws-config = { version = "0.51.0", default-features = false, features=["rustls"] }
|
||||
aws-sdk-s3 = "0.21.0"
|
||||
hyper = { version = "0.14", features = ["stream"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1.17", features = ["sync", "macros", "fs", "io-util"] }
|
||||
|
||||
@@ -4,27 +4,36 @@
|
||||
//! allowing multiple api users to independently work with the same S3 bucket, if
|
||||
//! their bucket prefixes are both specified and different.
|
||||
|
||||
use std::env::var;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use rusoto_core::{
|
||||
credential::{InstanceMetadataProvider, StaticProvider},
|
||||
HttpClient, Region, RusotoError,
|
||||
use aws_config::{
|
||||
environment::credentials::EnvironmentVariableCredentialsProvider, imds,
|
||||
imds::credentials::ImdsCredentialsProvider, meta::credentials::provide_credentials_fn,
|
||||
};
|
||||
use rusoto_s3::{
|
||||
DeleteObjectRequest, GetObjectError, GetObjectRequest, ListObjectsV2Request, PutObjectRequest,
|
||||
S3Client, StreamingBody, S3,
|
||||
use aws_sdk_s3::{
|
||||
config::Config,
|
||||
error::{GetObjectError, GetObjectErrorKind},
|
||||
types::{ByteStream, SdkError},
|
||||
Client, Endpoint, Region,
|
||||
};
|
||||
use aws_smithy_http::body::SdkBody;
|
||||
use aws_types::credentials::{CredentialsError, ProvideCredentials};
|
||||
use hyper::Body;
|
||||
use tokio::{io, sync::Semaphore};
|
||||
use tokio_util::io::ReaderStream;
|
||||
use tracing::debug;
|
||||
|
||||
use super::StorageMetadata;
|
||||
use crate::{
|
||||
strip_path_prefix, Download, DownloadError, RemoteObjectId, RemoteStorage, S3Config,
|
||||
REMOTE_STORAGE_PREFIX_SEPARATOR,
|
||||
};
|
||||
|
||||
use super::StorageMetadata;
|
||||
const DEFAULT_IMDS_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
pub(super) mod metrics {
|
||||
use metrics::{register_int_counter_vec, IntCounterVec};
|
||||
@@ -116,7 +125,7 @@ fn download_destination(
|
||||
/// AWS S3 storage.
|
||||
pub struct S3Bucket {
|
||||
workdir: PathBuf,
|
||||
client: S3Client,
|
||||
client: Client,
|
||||
bucket_name: String,
|
||||
prefix_in_bucket: Option<String>,
|
||||
// Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
|
||||
@@ -125,6 +134,12 @@ pub struct S3Bucket {
|
||||
concurrency_limiter: Semaphore,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct GetObjectRequest {
|
||||
bucket: String,
|
||||
key: String,
|
||||
range: Option<String>,
|
||||
}
|
||||
impl S3Bucket {
|
||||
/// Creates the S3 storage, errors if incorrect AWS S3 configuration provided.
|
||||
pub fn new(aws_config: &S3Config, workdir: PathBuf) -> anyhow::Result<Self> {
|
||||
@@ -132,43 +147,40 @@ impl S3Bucket {
|
||||
"Creating s3 remote storage for S3 bucket {}",
|
||||
aws_config.bucket_name
|
||||
);
|
||||
let region = match aws_config.endpoint.clone() {
|
||||
Some(custom_endpoint) => Region::Custom {
|
||||
name: aws_config.bucket_region.clone(),
|
||||
endpoint: custom_endpoint,
|
||||
},
|
||||
None => aws_config
|
||||
.bucket_region
|
||||
.parse::<Region>()
|
||||
.context("Failed to parse the s3 region from config")?,
|
||||
};
|
||||
let request_dispatcher = HttpClient::new().context("Failed to create S3 http client")?;
|
||||
let mut config_builder = Config::builder()
|
||||
.region(Region::new(aws_config.bucket_region.clone()))
|
||||
.credentials_provider(provide_credentials_fn(|| async {
|
||||
match var("AWS_ACCESS_KEY_ID").is_ok() && var("AWS_SECRET_ACCESS_KEY").is_ok() {
|
||||
true => {
|
||||
EnvironmentVariableCredentialsProvider::new()
|
||||
.provide_credentials()
|
||||
.await
|
||||
}
|
||||
false => {
|
||||
let imds_client = imds::Client::builder()
|
||||
.connect_timeout(DEFAULT_IMDS_TIMEOUT)
|
||||
.read_timeout(DEFAULT_IMDS_TIMEOUT)
|
||||
.build()
|
||||
.await
|
||||
.map_err(CredentialsError::unhandled)?;
|
||||
ImdsCredentialsProvider::builder()
|
||||
.imds_client(imds_client)
|
||||
.build()
|
||||
.provide_credentials()
|
||||
.await
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
let access_key_id = std::env::var("AWS_ACCESS_KEY_ID").ok();
|
||||
let secret_access_key = std::env::var("AWS_SECRET_ACCESS_KEY").ok();
|
||||
// session token is used when authorizing through sso
|
||||
// which is typically the case when testing locally on developer machine
|
||||
let session_token = std::env::var("AWS_SESSION_TOKEN").ok();
|
||||
|
||||
let client = if access_key_id.is_none() && secret_access_key.is_none() {
|
||||
debug!("Using IAM-based AWS access");
|
||||
S3Client::new_with(request_dispatcher, InstanceMetadataProvider::new(), region)
|
||||
} else {
|
||||
debug!(
|
||||
"Using credentials-based AWS access. Session token is set: {}",
|
||||
session_token.is_some()
|
||||
if let Some(custom_endpoint) = aws_config.endpoint.clone() {
|
||||
let endpoint = Endpoint::immutable(
|
||||
custom_endpoint
|
||||
.parse()
|
||||
.expect("Failed to parse S3 custom endpoint"),
|
||||
);
|
||||
S3Client::new_with(
|
||||
request_dispatcher,
|
||||
StaticProvider::new(
|
||||
access_key_id.unwrap_or_default(),
|
||||
secret_access_key.unwrap_or_default(),
|
||||
session_token,
|
||||
None,
|
||||
),
|
||||
region,
|
||||
)
|
||||
};
|
||||
config_builder.set_endpoint_resolver(Some(Arc::new(endpoint)));
|
||||
}
|
||||
let client = Client::from_conf(config_builder.build());
|
||||
|
||||
let prefix_in_bucket = aws_config.prefix_in_bucket.as_deref().map(|prefix| {
|
||||
let mut prefix = prefix;
|
||||
@@ -182,7 +194,6 @@ impl S3Bucket {
|
||||
}
|
||||
prefix
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
workdir,
|
||||
@@ -202,20 +213,33 @@ impl S3Bucket {
|
||||
|
||||
metrics::inc_get_object();
|
||||
|
||||
match self.client.get_object(request).await {
|
||||
Ok(object_output) => match object_output.body {
|
||||
None => {
|
||||
metrics::inc_get_object_fail();
|
||||
Err(DownloadError::Other(anyhow::anyhow!(
|
||||
"Got no body for the S3 object given"
|
||||
)))
|
||||
}
|
||||
Some(body) => Ok(Download {
|
||||
metadata: object_output.metadata.map(StorageMetadata),
|
||||
download_stream: Box::pin(io::BufReader::new(body.into_async_read())),
|
||||
}),
|
||||
},
|
||||
Err(RusotoError::Service(GetObjectError::NoSuchKey(_))) => Err(DownloadError::NotFound),
|
||||
let get_object = self
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(request.bucket)
|
||||
.key(request.key)
|
||||
.set_range(request.range)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
match get_object {
|
||||
Ok(object_output) => {
|
||||
let metadata = object_output.metadata().cloned().map(StorageMetadata);
|
||||
Ok(Download {
|
||||
metadata,
|
||||
download_stream: Box::pin(io::BufReader::new(
|
||||
object_output.body.into_async_read(),
|
||||
)),
|
||||
})
|
||||
}
|
||||
Err(SdkError::ServiceError {
|
||||
err:
|
||||
GetObjectError {
|
||||
kind: GetObjectErrorKind::NoSuchKey(..),
|
||||
..
|
||||
},
|
||||
..
|
||||
}) => Err(DownloadError::NotFound),
|
||||
Err(e) => {
|
||||
metrics::inc_get_object_fail();
|
||||
Err(DownloadError::Other(anyhow::anyhow!(
|
||||
@@ -261,12 +285,11 @@ impl RemoteStorage for S3Bucket {
|
||||
|
||||
let fetch_response = self
|
||||
.client
|
||||
.list_objects_v2(ListObjectsV2Request {
|
||||
bucket: self.bucket_name.clone(),
|
||||
prefix: self.prefix_in_bucket.clone(),
|
||||
continuation_token,
|
||||
..ListObjectsV2Request::default()
|
||||
})
|
||||
.list_objects_v2()
|
||||
.bucket(self.bucket_name.clone())
|
||||
.set_prefix(self.prefix_in_bucket.clone())
|
||||
.set_continuation_token(continuation_token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
metrics::inc_list_objects_fail();
|
||||
@@ -322,13 +345,12 @@ impl RemoteStorage for S3Bucket {
|
||||
|
||||
let fetch_response = self
|
||||
.client
|
||||
.list_objects_v2(ListObjectsV2Request {
|
||||
bucket: self.bucket_name.clone(),
|
||||
prefix: list_prefix.clone(),
|
||||
continuation_token,
|
||||
delimiter: Some(REMOTE_STORAGE_PREFIX_SEPARATOR.to_string()),
|
||||
..ListObjectsV2Request::default()
|
||||
})
|
||||
.list_objects_v2()
|
||||
.bucket(self.bucket_name.clone())
|
||||
.set_prefix(list_prefix.clone())
|
||||
.set_continuation_token(continuation_token)
|
||||
.delimiter(REMOTE_STORAGE_PREFIX_SEPARATOR.to_string())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
metrics::inc_list_objects_fail();
|
||||
@@ -366,17 +388,18 @@ impl RemoteStorage for S3Bucket {
|
||||
.context("Concurrency limiter semaphore got closed during S3 upload")?;
|
||||
|
||||
metrics::inc_put_object();
|
||||
|
||||
let body = Body::wrap_stream(ReaderStream::new(from));
|
||||
let bytes_stream = ByteStream::new(SdkBody::from(body));
|
||||
|
||||
self.client
|
||||
.put_object(PutObjectRequest {
|
||||
body: Some(StreamingBody::new_with_size(
|
||||
ReaderStream::new(from),
|
||||
from_size_bytes,
|
||||
)),
|
||||
bucket: self.bucket_name.clone(),
|
||||
key: to.0.to_owned(),
|
||||
metadata: metadata.map(|m| m.0),
|
||||
..PutObjectRequest::default()
|
||||
})
|
||||
.put_object()
|
||||
.bucket(self.bucket_name.clone())
|
||||
.key(to.0.to_owned())
|
||||
.set_metadata(metadata.map(|m| m.0))
|
||||
.content_length(from_size_bytes.try_into()?)
|
||||
.body(bytes_stream)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
metrics::inc_put_object_fail();
|
||||
@@ -412,7 +435,6 @@ impl RemoteStorage for S3Bucket {
|
||||
bucket: self.bucket_name.clone(),
|
||||
key: from.0.to_owned(),
|
||||
range,
|
||||
..GetObjectRequest::default()
|
||||
})
|
||||
.await
|
||||
}
|
||||
@@ -427,11 +449,10 @@ impl RemoteStorage for S3Bucket {
|
||||
metrics::inc_delete_object();
|
||||
|
||||
self.client
|
||||
.delete_object(DeleteObjectRequest {
|
||||
bucket: self.bucket_name.clone(),
|
||||
key: remote_object_id.0.to_owned(),
|
||||
..DeleteObjectRequest::default()
|
||||
})
|
||||
.delete_object()
|
||||
.bucket(self.bucket_name.clone())
|
||||
.key(remote_object_id.0.to_owned())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
metrics::inc_delete_object_fail();
|
||||
@@ -600,7 +621,7 @@ mod tests {
|
||||
fn dummy_storage(workdir: PathBuf) -> S3Bucket {
|
||||
S3Bucket {
|
||||
workdir,
|
||||
client: S3Client::new("us-east-1".parse().unwrap()),
|
||||
client: Client::new(&aws_config::SdkConfig::builder().build()),
|
||||
bucket_name: "dummy-bucket".to_string(),
|
||||
prefix_in_bucket: Some("dummy_prefix/".to_string()),
|
||||
concurrency_limiter: Semaphore::new(1),
|
||||
|
||||
@@ -7,7 +7,7 @@ use serde;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use anyhow::Result;
|
||||
use jsonwebtoken::{
|
||||
decode, encode, Algorithm, DecodingKey, EncodingKey, Header, TokenData, Validation,
|
||||
};
|
||||
@@ -21,8 +21,16 @@ const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Scope {
|
||||
// Provides access to all data for a specific tenant (specified in `struct Claims` below)
|
||||
// TODO: join these two?
|
||||
Tenant,
|
||||
// Provides blanket access to all tenants on the pageserver plus pageserver-wide APIs.
|
||||
// Should only be used e.g. for status check/tenant creation/list.
|
||||
PageServerApi,
|
||||
// Provides blanket access to all data on the safekeeper plus safekeeper-wide APIs.
|
||||
// Should only be used e.g. for status check.
|
||||
// Currently also used for connection from any pageserver to any safekeeper.
|
||||
SafekeeperData,
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
@@ -40,22 +48,6 @@ impl Claims {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_permission(claims: &Claims, tenant_id: Option<TenantId>) -> Result<()> {
|
||||
match (&claims.scope, tenant_id) {
|
||||
(Scope::Tenant, None) => {
|
||||
bail!("Attempt to access management api with tenant scope. Permission denied")
|
||||
}
|
||||
(Scope::Tenant, Some(tenant_id)) => {
|
||||
if claims.tenant_id.unwrap() != tenant_id {
|
||||
bail!("Tenant id mismatch. Permission denied")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
(Scope::PageServerApi, None) => Ok(()), // access to management api for PageServerApi scope
|
||||
(Scope::PageServerApi, Some(_)) => Ok(()), // access to tenant api using PageServerApi scope
|
||||
}
|
||||
}
|
||||
|
||||
pub struct JwtAuth {
|
||||
decoding_key: DecodingKey,
|
||||
validation: Validation,
|
||||
|
||||
45
libs/utils/src/fs_ext.rs
Normal file
45
libs/utils/src/fs_ext.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
/// Extensions to `std::fs` types.
|
||||
use std::{fs, io, path::Path};
|
||||
|
||||
pub trait PathExt {
|
||||
/// Returns an error if `self` is not a directory.
|
||||
fn is_empty_dir(&self) -> io::Result<bool>;
|
||||
}
|
||||
|
||||
impl<P> PathExt for P
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
fn is_empty_dir(&self) -> io::Result<bool> {
|
||||
Ok(fs::read_dir(self)?.into_iter().next().is_none())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn is_empty_dir() {
|
||||
use super::PathExt;
|
||||
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let dir_path = dir.path();
|
||||
|
||||
// test positive case
|
||||
assert!(
|
||||
dir_path.is_empty_dir().expect("test failure"),
|
||||
"new tempdir should be empty"
|
||||
);
|
||||
|
||||
// invoke on a file to ensure it returns an error
|
||||
let file_path: PathBuf = dir_path.join("testfile");
|
||||
let f = std::fs::File::create(&file_path).unwrap();
|
||||
drop(f);
|
||||
assert!(file_path.is_empty_dir().is_err());
|
||||
|
||||
// do it again on a path, we know to be nonexistent
|
||||
std::fs::remove_file(&file_path).unwrap();
|
||||
assert!(file_path.is_empty_dir().is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::auth::{self, Claims, JwtAuth};
|
||||
use crate::auth::{Claims, JwtAuth};
|
||||
use crate::http::error;
|
||||
use crate::id::TenantId;
|
||||
use anyhow::anyhow;
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server};
|
||||
@@ -144,10 +143,14 @@ pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check_permission(req: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
|
||||
pub fn check_permission_with(
|
||||
req: &Request<Body>,
|
||||
check_permission: impl Fn(&Claims) -> Result<(), anyhow::Error>,
|
||||
) -> Result<(), ApiError> {
|
||||
match req.context::<Claims>() {
|
||||
Some(claims) => Ok(auth::check_permission(&claims, tenant_id)
|
||||
.map_err(|err| ApiError::Forbidden(err.to_string()))?),
|
||||
Some(claims) => {
|
||||
Ok(check_permission(&claims).map_err(|err| ApiError::Forbidden(err.to_string()))?)
|
||||
}
|
||||
None => Ok(()), // claims is None because auth is disabled
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,13 @@ use std::{fmt, str::FromStr};
|
||||
use hex::FromHex;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum IdError {
|
||||
#[error("invalid id length {0}")]
|
||||
SliceParseError(usize),
|
||||
}
|
||||
|
||||
/// Neon ID is a 128-bit random ID.
|
||||
/// Used to represent various identifiers. Provides handy utility methods and impls.
|
||||
@@ -22,6 +29,15 @@ impl Id {
|
||||
Id::from(arr)
|
||||
}
|
||||
|
||||
pub fn from_slice(src: &[u8]) -> Result<Id, IdError> {
|
||||
if src.len() != 16 {
|
||||
return Err(IdError::SliceParseError(src.len()));
|
||||
}
|
||||
let mut id_array = [0u8; 16];
|
||||
id_array.copy_from_slice(src);
|
||||
Ok(id_array.into())
|
||||
}
|
||||
|
||||
pub fn as_arr(&self) -> [u8; 16] {
|
||||
self.0
|
||||
}
|
||||
@@ -100,6 +116,10 @@ macro_rules! id_newtype {
|
||||
$t(Id::get_from_buf(buf))
|
||||
}
|
||||
|
||||
pub fn from_slice(src: &[u8]) -> Result<$t, IdError> {
|
||||
Ok($t(Id::from_slice(src)?))
|
||||
}
|
||||
|
||||
pub fn as_arr(&self) -> [u8; 16] {
|
||||
self.0.as_arr()
|
||||
}
|
||||
|
||||
@@ -48,6 +48,8 @@ pub mod nonblock;
|
||||
// Default signal handling
|
||||
pub mod signals;
|
||||
|
||||
pub mod fs_ext;
|
||||
|
||||
/// use with fail::cfg("$name", "return(2000)")
|
||||
#[macro_export]
|
||||
macro_rules! failpoint_sleep_millis_async {
|
||||
|
||||
@@ -35,6 +35,7 @@ itertools = "0.10.3"
|
||||
nix = "0.25"
|
||||
num-traits = "0.2.15"
|
||||
once_cell = "1.13.0"
|
||||
pin-project-lite = "0.2.7"
|
||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
@@ -61,12 +62,15 @@ walkdir = "2.3.2"
|
||||
etcd_broker = { path = "../libs/etcd_broker" }
|
||||
metrics = { path = "../libs/metrics" }
|
||||
pageserver_api = { path = "../libs/pageserver_api" }
|
||||
postgres_connection = { path = "../libs/postgres_connection" }
|
||||
postgres_ffi = { path = "../libs/postgres_ffi" }
|
||||
pq_proto = { path = "../libs/pq_proto" }
|
||||
remote_storage = { path = "../libs/remote_storage" }
|
||||
tenant_size_model = { path = "../libs/tenant_size_model" }
|
||||
utils = { path = "../libs/utils" }
|
||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||
rpds = "0.12.0"
|
||||
im = "15.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
|
||||
12
pageserver/benches/README.md
Normal file
12
pageserver/benches/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Pageserver Benchmarks
|
||||
|
||||
# How to run
|
||||
|
||||
To run all benchmarks:
|
||||
`cargo bench`
|
||||
|
||||
To run a specific file:
|
||||
`cargo bench --bench bench_layer_map`
|
||||
|
||||
To run a specific function:
|
||||
`cargo bench --bench bench_layer_map -- real_map_uniform_queries`
|
||||
File diff suppressed because it is too large
Load Diff
5651
pageserver/benches/large-layer-map-layernames.txt
Normal file
5651
pageserver/benches/large-layer-map-layernames.txt
Normal file
File diff suppressed because it is too large
Load Diff
26690
pageserver/benches/odd-brook-layernames.txt
Normal file
26690
pageserver/benches/odd-brook-layernames.txt
Normal file
File diff suppressed because it is too large
Load Diff
22
pageserver/src/auth.rs
Normal file
22
pageserver/src/auth.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use anyhow::{bail, Result};
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::id::TenantId;
|
||||
|
||||
pub fn check_permission(claims: &Claims, tenant_id: Option<TenantId>) -> Result<()> {
|
||||
match (&claims.scope, tenant_id) {
|
||||
(Scope::Tenant, None) => {
|
||||
bail!("Attempt to access management api with tenant scope. Permission denied")
|
||||
}
|
||||
(Scope::Tenant, Some(tenant_id)) => {
|
||||
if claims.tenant_id.unwrap() != tenant_id {
|
||||
bail!("Tenant id mismatch. Permission denied")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
(Scope::PageServerApi, None) => Ok(()), // access to management api for PageServerApi scope
|
||||
(Scope::PageServerApi, Some(_)) => Ok(()), // access to tenant api using PageServerApi scope
|
||||
(Scope::SafekeeperData, _) => {
|
||||
bail!("SafekeeperData scope makes no sense for Pageserver")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
//! Main entry point for the Page Server executable.
|
||||
|
||||
use std::env::{var, VarError};
|
||||
use std::sync::Arc;
|
||||
use std::{env, ops::ControlFlow, path::Path, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
@@ -270,6 +272,23 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
};
|
||||
info!("Using auth: {:#?}", conf.auth_type);
|
||||
|
||||
match var("ZENITH_AUTH_TOKEN") {
|
||||
Ok(v) => {
|
||||
info!("Loaded JWT token for authentication with Safekeeper");
|
||||
pageserver::config::SAFEKEEPER_AUTH_TOKEN
|
||||
.set(Arc::new(v))
|
||||
.map_err(|_| anyhow!("Could not initialize SAFEKEEPER_AUTH_TOKEN"))?;
|
||||
}
|
||||
Err(VarError::NotPresent) => {
|
||||
info!("No JWT token for authentication with Safekeeper detected");
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e).with_context(|| {
|
||||
"Failed to either load to detect non-present ZENITH_AUTH_TOKEN environment variable"
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let remote_storage = conf
|
||||
.remote_storage_config
|
||||
.as_ref()
|
||||
@@ -278,7 +297,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
})
|
||||
.transpose()
|
||||
.context("Failed to init generic remote storage")?;
|
||||
let remote_index = {
|
||||
{
|
||||
let _rt_guard = BACKGROUND_RUNTIME.enter();
|
||||
tenant_mgr::init_tenant_mgr(conf, remote_storage.clone())?
|
||||
};
|
||||
@@ -290,7 +309,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
{
|
||||
let _rt_guard = MGMT_REQUEST_RUNTIME.enter();
|
||||
|
||||
let router = http::make_router(conf, auth.clone(), remote_index, remote_storage)?;
|
||||
let router = http::make_router(conf, auth.clone(), remote_storage)?;
|
||||
let service =
|
||||
utils::http::RouterService::new(router.build().map_err(|err| anyhow!(err))?).unwrap();
|
||||
let server = hyper::Server::from_tcp(http_listener)?
|
||||
|
||||
@@ -10,9 +10,11 @@ use std::env;
|
||||
use utils::crashsafe::path_with_suffix_extension;
|
||||
use utils::id::ConnectionId;
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use toml_edit;
|
||||
use toml_edit::{Document, Item};
|
||||
@@ -23,13 +25,9 @@ use utils::{
|
||||
postgres_backend::AuthType,
|
||||
};
|
||||
|
||||
use crate::tenant::TIMELINES_SEGMENT_NAME;
|
||||
use crate::tenant::{TENANT_ATTACHING_MARKER_FILENAME, TIMELINES_SEGMENT_NAME};
|
||||
use crate::tenant_config::{TenantConf, TenantConfOpt};
|
||||
|
||||
/// The name of the metadata file pageserver creates per timeline.
|
||||
pub const METADATA_FILE_NAME: &str = "metadata";
|
||||
pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit";
|
||||
const TENANT_CONFIG_NAME: &str = "config";
|
||||
use crate::{METADATA_FILE_NAME, TENANT_CONFIG_NAME, TIMELINE_UNINIT_MARK_SUFFIX};
|
||||
|
||||
pub mod defaults {
|
||||
use crate::tenant_config::defaults::*;
|
||||
@@ -145,6 +143,15 @@ pub struct PageServerConf {
|
||||
pub concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore,
|
||||
}
|
||||
|
||||
/// We do not want to store this in a PageServerConf because the latter may be logged
|
||||
/// and/or serialized at a whim, while the token is secret. Currently this token is the
|
||||
/// same for accessing all tenants/timelines, but may become per-tenant/per-timeline in
|
||||
/// the future, more tokens and auth may arrive for etcd and/or its rewrite (see
|
||||
/// https://github.com/neondatabase/neon/issues/2394), completely changing the logic.
|
||||
/// Hence, we resort to a global variable for now instead of passing the token from the
|
||||
/// startup code to the connection code through a dozen layers.
|
||||
pub static SAFEKEEPER_AUTH_TOKEN: OnceCell<Arc<String>> = OnceCell::new();
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ProfilingConfig {
|
||||
Disabled,
|
||||
@@ -390,6 +397,11 @@ impl PageServerConf {
|
||||
self.tenants_path().join(tenant_id.to_string())
|
||||
}
|
||||
|
||||
pub fn tenant_attaching_mark_file_path(&self, tenant_id: &TenantId) -> PathBuf {
|
||||
self.tenant_path(tenant_id)
|
||||
.join(TENANT_ATTACHING_MARKER_FILENAME)
|
||||
}
|
||||
|
||||
/// Points to a place in pageserver's local directory,
|
||||
/// where certain tenant's tenantconf file should be located.
|
||||
pub fn tenant_config_path(&self, tenant_id: TenantId) -> PathBuf {
|
||||
|
||||
@@ -604,13 +604,7 @@ components:
|
||||
id:
|
||||
type: string
|
||||
state:
|
||||
oneOf:
|
||||
- type: string
|
||||
- type: object
|
||||
properties:
|
||||
background_jobs_running:
|
||||
type: boolean
|
||||
|
||||
type: string
|
||||
current_physical_size:
|
||||
type: integer
|
||||
has_in_progress_downloads:
|
||||
|
||||
@@ -3,31 +3,29 @@ use std::sync::Arc;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use hyper::StatusCode;
|
||||
use hyper::{Body, Request, Response, Uri};
|
||||
use pageserver_api::models::TenantState;
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use tokio::task::JoinError;
|
||||
use tracing::*;
|
||||
|
||||
use super::models::{LocalTimelineInfo, RemoteTimelineInfo, TimelineInfo};
|
||||
use super::models::{
|
||||
StatusResponse, TenantConfigRequest, TenantCreateRequest, TenantCreateResponse, TenantInfo,
|
||||
TimelineCreateRequest,
|
||||
LocalTimelineInfo, RemoteTimelineInfo, StatusResponse, TenantConfigRequest,
|
||||
TenantCreateRequest, TenantCreateResponse, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
||||
};
|
||||
use crate::pgdatadir_mapping::LsnForTimestamp;
|
||||
use crate::storage_sync;
|
||||
use crate::storage_sync::index::{RemoteIndex, RemoteTimeline};
|
||||
use crate::tenant::{TenantState, Timeline};
|
||||
use crate::tenant::Timeline;
|
||||
use crate::tenant_config::TenantConfOpt;
|
||||
use crate::{config::PageServerConf, tenant_mgr};
|
||||
use utils::{
|
||||
auth::JwtAuth,
|
||||
http::{
|
||||
endpoint::{self, attach_openapi_ui, auth_middleware, check_permission},
|
||||
endpoint::{self, attach_openapi_ui, auth_middleware, check_permission_with},
|
||||
error::{ApiError, HttpErrorBody},
|
||||
json::{json_request, json_response},
|
||||
request::parse_request_param,
|
||||
RequestExt, RouterBuilder,
|
||||
},
|
||||
id::{TenantId, TenantTimelineId, TimelineId},
|
||||
id::{TenantId, TimelineId},
|
||||
lsn::Lsn,
|
||||
};
|
||||
|
||||
@@ -40,7 +38,6 @@ use crate::CheckpointConfig;
|
||||
struct State {
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
remote_index: RemoteIndex,
|
||||
allowlist_routes: Vec<Uri>,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
}
|
||||
@@ -49,7 +46,6 @@ impl State {
|
||||
fn new(
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
remote_index: RemoteIndex,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let allowlist_routes = ["/v1/status", "/v1/doc", "/swagger.yml"]
|
||||
@@ -60,7 +56,6 @@ impl State {
|
||||
conf,
|
||||
auth,
|
||||
allowlist_routes,
|
||||
remote_index,
|
||||
remote_storage,
|
||||
})
|
||||
}
|
||||
@@ -79,19 +74,41 @@ fn get_config(request: &Request<Body>) -> &'static PageServerConf {
|
||||
get_state(request).conf
|
||||
}
|
||||
|
||||
fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
|
||||
check_permission_with(request, |claims| {
|
||||
crate::auth::check_permission(claims, tenant_id)
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to construct a TimelineInfo struct for a timeline
|
||||
async fn build_timeline_info(
|
||||
state: &State,
|
||||
fn build_timeline_info(
|
||||
tenant_state: TenantState,
|
||||
timeline: &Arc<Timeline>,
|
||||
include_non_incremental_logical_size: bool,
|
||||
include_non_incremental_physical_size: bool,
|
||||
) -> anyhow::Result<TimelineInfo> {
|
||||
let mut info = build_timeline_info_common(tenant_state, timeline)?;
|
||||
if include_non_incremental_logical_size {
|
||||
info.current_logical_size_non_incremental =
|
||||
Some(timeline.get_current_logical_size_non_incremental(info.last_record_lsn)?);
|
||||
}
|
||||
if include_non_incremental_physical_size {
|
||||
info.current_physical_size_non_incremental =
|
||||
Some(timeline.get_physical_size_non_incremental()?)
|
||||
}
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
fn build_timeline_info_common(
|
||||
tenant_state: TenantState,
|
||||
timeline: &Arc<Timeline>,
|
||||
) -> anyhow::Result<TimelineInfo> {
|
||||
let last_record_lsn = timeline.get_last_record_lsn();
|
||||
let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
|
||||
let guard = timeline.last_received_wal.lock().unwrap();
|
||||
if let Some(info) = guard.as_ref() {
|
||||
(
|
||||
Some(info.wal_source_connstr.clone()),
|
||||
Some(format!("{:?}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
|
||||
Some(info.last_received_msg_lsn),
|
||||
Some(info.last_received_msg_ts),
|
||||
)
|
||||
@@ -100,22 +117,6 @@ async fn build_timeline_info(
|
||||
}
|
||||
};
|
||||
|
||||
let (remote_consistent_lsn, awaits_download) = if let Some(remote_entry) = state
|
||||
.remote_index
|
||||
.read()
|
||||
.await
|
||||
.timeline_entry(&TenantTimelineId {
|
||||
tenant_id: timeline.tenant_id,
|
||||
timeline_id: timeline.timeline_id,
|
||||
}) {
|
||||
(
|
||||
Some(remote_entry.metadata.disk_consistent_lsn()),
|
||||
remote_entry.awaits_download,
|
||||
)
|
||||
} else {
|
||||
(None, false)
|
||||
};
|
||||
|
||||
let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
|
||||
let ancestor_lsn = match timeline.get_ancestor_lsn() {
|
||||
Lsn(0) => None,
|
||||
@@ -130,6 +131,7 @@ async fn build_timeline_info(
|
||||
};
|
||||
let current_physical_size = Some(timeline.get_physical_size());
|
||||
let state = timeline.current_state();
|
||||
let remote_consistent_lsn = timeline.get_remote_consistent_lsn().unwrap_or(Lsn(0));
|
||||
|
||||
let info = TimelineInfo {
|
||||
tenant_id: timeline.tenant_id,
|
||||
@@ -137,30 +139,25 @@ async fn build_timeline_info(
|
||||
ancestor_timeline_id,
|
||||
ancestor_lsn,
|
||||
disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
|
||||
remote_consistent_lsn,
|
||||
last_record_lsn,
|
||||
prev_record_lsn: Some(timeline.get_prev_record_lsn()),
|
||||
latest_gc_cutoff_lsn: *timeline.get_latest_gc_cutoff_lsn(),
|
||||
current_logical_size,
|
||||
current_physical_size,
|
||||
current_logical_size_non_incremental: if include_non_incremental_logical_size {
|
||||
Some(timeline.get_current_logical_size_non_incremental(last_record_lsn)?)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
current_physical_size_non_incremental: if include_non_incremental_physical_size {
|
||||
Some(timeline.get_physical_size_non_incremental()?)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
current_logical_size_non_incremental: None,
|
||||
current_physical_size_non_incremental: None,
|
||||
wal_source_connstr,
|
||||
last_received_msg_lsn,
|
||||
last_received_msg_ts,
|
||||
pg_version: timeline.pg_version,
|
||||
|
||||
remote_consistent_lsn,
|
||||
awaits_download,
|
||||
state,
|
||||
|
||||
// XXX bring back tracking of downloads per timeline, or, introduce
|
||||
// an 'Attaching' state for the timeline and get rid of this field.
|
||||
awaits_download: tenant_state == TenantState::Attaching,
|
||||
|
||||
// Duplicate some fields in 'local' and 'remote' fields, for backwards-compatility
|
||||
// with the control plane.
|
||||
local: LocalTimelineInfo {
|
||||
@@ -170,7 +167,7 @@ async fn build_timeline_info(
|
||||
current_physical_size,
|
||||
},
|
||||
remote: RemoteTimelineInfo {
|
||||
remote_consistent_lsn,
|
||||
remote_consistent_lsn: Some(remote_consistent_lsn),
|
||||
},
|
||||
};
|
||||
Ok(info)
|
||||
@@ -178,6 +175,7 @@ async fn build_timeline_info(
|
||||
|
||||
// healthcheck handler
|
||||
async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
check_permission(&request, None)?;
|
||||
let config = get_config(&request);
|
||||
json_response(StatusCode::OK, StatusResponse { id: config.id })
|
||||
}
|
||||
@@ -187,34 +185,28 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
|
||||
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
let state = get_state(&request);
|
||||
let new_timeline_id = request_data
|
||||
.new_timeline_id
|
||||
.unwrap_or_else(TimelineId::generate);
|
||||
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
|
||||
let new_timeline_info = async {
|
||||
match tenant.create_timeline(
|
||||
request_data.new_timeline_id.map(TimelineId::from),
|
||||
request_data.ancestor_timeline_id.map(TimelineId::from),
|
||||
request_data.ancestor_start_lsn,
|
||||
request_data.pg_version.unwrap_or(crate::DEFAULT_PG_VERSION)
|
||||
).await {
|
||||
Ok(Some(new_timeline)) => {
|
||||
// Created. Construct a TimelineInfo for it.
|
||||
let timeline_info = build_timeline_info(state, &new_timeline, false, false)
|
||||
.await
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
Ok(Some(timeline_info))
|
||||
}
|
||||
Ok(None) => Ok(None), // timeline already exists
|
||||
Err(err) => Err(ApiError::InternalServerError(err)),
|
||||
match tenant.create_timeline(
|
||||
new_timeline_id,
|
||||
request_data.ancestor_timeline_id.map(TimelineId::from),
|
||||
request_data.ancestor_start_lsn,
|
||||
request_data.pg_version.unwrap_or(crate::DEFAULT_PG_VERSION)
|
||||
)
|
||||
.instrument(info_span!("timeline_create", tenant = %tenant_id, new_timeline = ?request_data.new_timeline_id, timeline_id = %new_timeline_id, lsn=?request_data.ancestor_start_lsn, pg_version=?request_data.pg_version))
|
||||
.await {
|
||||
Ok(Some(new_timeline)) => {
|
||||
// Created. Construct a TimelineInfo for it.
|
||||
let timeline_info = build_timeline_info_common(tenant.current_state(), &new_timeline)
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
json_response(StatusCode::CREATED, timeline_info)
|
||||
}
|
||||
Ok(None) => json_response(StatusCode::CONFLICT, ()), // timeline already exists
|
||||
Err(err) => Err(ApiError::InternalServerError(err)),
|
||||
}
|
||||
.instrument(info_span!("timeline_create", tenant = %tenant_id, new_timeline = ?request_data.new_timeline_id, lsn=?request_data.ancestor_start_lsn, pg_version=?request_data.pg_version))
|
||||
.await?;
|
||||
|
||||
Ok(match new_timeline_info {
|
||||
Some(info) => json_response(StatusCode::CREATED, info)?,
|
||||
None => json_response(StatusCode::CONFLICT, ())?,
|
||||
})
|
||||
}
|
||||
|
||||
async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
@@ -225,22 +217,21 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
|
||||
query_param_present(&request, "include-non-incremental-physical-size");
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
let state = get_state(&request);
|
||||
let _entered = info_span!("timeline_list", tenant = %tenant_id).entered();
|
||||
|
||||
let timelines = info_span!("timeline_list", tenant = %tenant_id).in_scope(|| {
|
||||
let (tenant_state, timelines) = {
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
|
||||
Ok(tenant.list_timelines())
|
||||
})?;
|
||||
(tenant.current_state(), tenant.list_timelines())
|
||||
};
|
||||
|
||||
let mut response_data = Vec::with_capacity(timelines.len());
|
||||
for timeline in timelines {
|
||||
let timeline_info = build_timeline_info(
|
||||
state,
|
||||
tenant_state,
|
||||
&timeline,
|
||||
include_non_incremental_logical_size,
|
||||
include_non_incremental_physical_size,
|
||||
)
|
||||
.await
|
||||
.context("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}")
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
@@ -289,24 +280,25 @@ async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body
|
||||
query_param_present(&request, "include-non-incremental-physical-size");
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
let state = get_state(&request);
|
||||
|
||||
let timeline_info = async {
|
||||
let timeline = tokio::task::spawn_blocking(move || {
|
||||
tenant_mgr::get_tenant(tenant_id, true)?.get_timeline(timeline_id, false)
|
||||
let (tenant_state, timeline) = tokio::task::spawn_blocking(move || {
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
|
||||
Ok((
|
||||
tenant.current_state(),
|
||||
tenant.get_timeline(timeline_id, false),
|
||||
))
|
||||
})
|
||||
.await
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?;
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??;
|
||||
|
||||
let timeline = timeline.map_err(ApiError::NotFound)?;
|
||||
|
||||
let timeline_info = build_timeline_info(
|
||||
state,
|
||||
tenant_state,
|
||||
&timeline,
|
||||
include_non_incremental_logical_size,
|
||||
include_non_incremental_physical_size,
|
||||
)
|
||||
.await
|
||||
.context("Failed to get local timeline info: {e:#}")
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
@@ -351,117 +343,28 @@ async fn tenant_attach_handler(request: Request<Body>) -> Result<Response<Body>,
|
||||
|
||||
info!("Handling tenant attach {tenant_id}");
|
||||
|
||||
tokio::task::spawn_blocking(move || match tenant_mgr::get_tenant(tenant_id, false) {
|
||||
Ok(tenant) => {
|
||||
if tenant.list_timelines().is_empty() {
|
||||
info!("Attaching to tenant {tenant_id} with zero timelines");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ApiError::Conflict(
|
||||
"Tenant is already present locally".to_owned(),
|
||||
))
|
||||
}
|
||||
}
|
||||
Err(_) => Ok(()),
|
||||
})
|
||||
.await
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??;
|
||||
|
||||
let state = get_state(&request);
|
||||
let remote_index = &state.remote_index;
|
||||
|
||||
let mut index_accessor = remote_index.write().await;
|
||||
if let Some(tenant_entry) = index_accessor.tenant_entry_mut(&tenant_id) {
|
||||
if tenant_entry.has_in_progress_downloads() {
|
||||
return Err(ApiError::Conflict(
|
||||
"Tenant download is already in progress".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
for (timeline_id, remote_timeline) in tenant_entry.iter_mut() {
|
||||
storage_sync::schedule_layer_download(tenant_id, *timeline_id);
|
||||
remote_timeline.awaits_download = true;
|
||||
}
|
||||
return json_response(StatusCode::ACCEPTED, ());
|
||||
}
|
||||
// no tenant in the index, release the lock to make the potentially lengthy download operation
|
||||
drop(index_accessor);
|
||||
|
||||
// download index parts for every tenant timeline
|
||||
let remote_timelines = match gather_tenant_timelines_index_parts(state, tenant_id).await {
|
||||
Ok(Some(remote_timelines)) => remote_timelines,
|
||||
Ok(None) => return Err(ApiError::NotFound(anyhow!("Unknown remote tenant"))),
|
||||
Err(e) => {
|
||||
error!("Failed to retrieve remote tenant data: {:?}", e);
|
||||
return Err(ApiError::NotFound(anyhow!(
|
||||
"Failed to retrieve remote tenant"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// recheck that download is not in progress because
|
||||
// we've released the lock to avoid holding it during the download
|
||||
let mut index_accessor = remote_index.write().await;
|
||||
let tenant_entry = match index_accessor.tenant_entry_mut(&tenant_id) {
|
||||
Some(tenant_entry) => {
|
||||
if tenant_entry.has_in_progress_downloads() {
|
||||
return Err(ApiError::Conflict(
|
||||
"Tenant download is already in progress".to_string(),
|
||||
));
|
||||
}
|
||||
tenant_entry
|
||||
}
|
||||
None => index_accessor.add_tenant_entry(tenant_id),
|
||||
};
|
||||
|
||||
// populate remote index with the data from index part and create directories on the local filesystem
|
||||
for (timeline_id, mut remote_timeline) in remote_timelines {
|
||||
tokio::fs::create_dir_all(state.conf.timeline_path(&timeline_id, &tenant_id))
|
||||
if let Some(remote_storage) = &state.remote_storage {
|
||||
// FIXME: distinguish between "Tenant already exists" and other errors
|
||||
tenant_mgr::attach_tenant(state.conf, tenant_id, remote_storage)
|
||||
.instrument(info_span!("tenant_attach", tenant = %tenant_id))
|
||||
.await
|
||||
.context("Failed to create new timeline directory")
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
remote_timeline.awaits_download = true;
|
||||
tenant_entry.insert(timeline_id, remote_timeline);
|
||||
// schedule actual download
|
||||
storage_sync::schedule_layer_download(tenant_id, timeline_id);
|
||||
} else {
|
||||
return Err(ApiError::BadRequest(anyhow!(
|
||||
"attach_tenant is possible because pageserver was configured without remote storage"
|
||||
)));
|
||||
}
|
||||
|
||||
json_response(StatusCode::ACCEPTED, ())
|
||||
}
|
||||
|
||||
/// Note: is expensive from s3 access perspective,
|
||||
/// for details see comment to `storage_sync::gather_tenant_timelines_index_parts`
|
||||
async fn gather_tenant_timelines_index_parts(
|
||||
state: &State,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<Option<Vec<(TimelineId, RemoteTimeline)>>> {
|
||||
let index_parts = match state.remote_storage.as_ref() {
|
||||
Some(storage) => {
|
||||
storage_sync::gather_tenant_timelines_index_parts(state.conf, storage, tenant_id).await
|
||||
}
|
||||
None => return Ok(None),
|
||||
}
|
||||
.with_context(|| format!("Failed to download index parts for tenant {tenant_id}"))?;
|
||||
|
||||
let mut remote_timelines = Vec::with_capacity(index_parts.len());
|
||||
for (timeline_id, index_part) in index_parts {
|
||||
let timeline_path = state.conf.timeline_path(&timeline_id, &tenant_id);
|
||||
let remote_timeline = RemoteTimeline::from_index_part(&timeline_path, index_part)
|
||||
.with_context(|| {
|
||||
format!("Failed to convert index part into remote timeline for timeline {tenant_id}/{timeline_id}")
|
||||
})?;
|
||||
remote_timelines.push((timeline_id, remote_timeline));
|
||||
}
|
||||
Ok(Some(remote_timelines))
|
||||
}
|
||||
|
||||
async fn timeline_delete_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
let state = get_state(&request);
|
||||
tenant_mgr::delete_timeline(tenant_id, timeline_id)
|
||||
.instrument(info_span!("timeline_delete", tenant = %tenant_id, timeline = %timeline_id))
|
||||
.await
|
||||
@@ -470,12 +373,6 @@ async fn timeline_delete_handler(request: Request<Body>) -> Result<Response<Body
|
||||
// it.
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
let mut remote_index = state.remote_index.write().await;
|
||||
remote_index.remove_timeline_entry(TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
});
|
||||
|
||||
json_response(StatusCode::OK, ())
|
||||
}
|
||||
|
||||
@@ -492,22 +389,23 @@ async fn tenant_detach_handler(request: Request<Body>) -> Result<Response<Body>,
|
||||
// Replace this with better handling once the error type permits it.
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
let mut remote_index = state.remote_index.write().await;
|
||||
remote_index.remove_tenant_entry(&tenant_id);
|
||||
|
||||
json_response(StatusCode::OK, ())
|
||||
}
|
||||
|
||||
async fn tenant_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
check_permission(&request, None)?;
|
||||
|
||||
let state = get_state(&request);
|
||||
// clone to avoid holding the lock while awaiting for blocking task
|
||||
let remote_index = state.remote_index.read().await.clone();
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
let _enter = info_span!("tenant_list").entered();
|
||||
crate::tenant_mgr::list_tenant_info(&remote_index)
|
||||
tenant_mgr::list_tenants()
|
||||
.iter()
|
||||
.map(|(id, state)| TenantInfo {
|
||||
id: *id,
|
||||
state: *state,
|
||||
current_physical_size: None,
|
||||
has_in_progress_downloads: Some(state.has_in_progress_downloads()),
|
||||
})
|
||||
.collect::<Vec<TenantInfo>>()
|
||||
})
|
||||
.await
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?;
|
||||
@@ -519,58 +417,38 @@ async fn tenant_status(request: Request<Body>) -> Result<Response<Body>, ApiErro
|
||||
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
// if tenant is in progress of downloading it can be absent in global tenant map
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, false);
|
||||
let tenant_info = tokio::task::spawn_blocking(move || {
|
||||
let _enter = info_span!("tenant_status_handler", tenant = %tenant_id).entered();
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, false)?;
|
||||
|
||||
let state = get_state(&request);
|
||||
let remote_index = &state.remote_index;
|
||||
|
||||
let index_accessor = remote_index.read().await;
|
||||
let has_in_progress_downloads = index_accessor
|
||||
.tenant_entry(&tenant_id)
|
||||
.map(|t| t.has_in_progress_downloads())
|
||||
.unwrap_or_else(|| {
|
||||
info!("Tenant {tenant_id} not found in remote index");
|
||||
false
|
||||
});
|
||||
|
||||
let (tenant_state, current_physical_size) = match tenant {
|
||||
Ok(tenant) => {
|
||||
let timelines = tenant.list_timelines();
|
||||
// Calculate total physical size of all timelines
|
||||
let mut current_physical_size = 0;
|
||||
for timeline in timelines {
|
||||
current_physical_size += timeline.get_physical_size();
|
||||
}
|
||||
|
||||
(tenant.current_state(), Some(current_physical_size))
|
||||
// Calculate total physical size of all timelines
|
||||
let mut current_physical_size = 0;
|
||||
for timeline in tenant.list_timelines().iter() {
|
||||
current_physical_size += timeline.get_physical_size();
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to get local tenant state: {e:#}");
|
||||
if has_in_progress_downloads {
|
||||
(TenantState::Paused, None)
|
||||
} else {
|
||||
(TenantState::Broken, None)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
json_response(
|
||||
StatusCode::OK,
|
||||
TenantInfo {
|
||||
let state = tenant.current_state();
|
||||
let tenant_info = TenantInfo {
|
||||
id: tenant_id,
|
||||
state: tenant_state,
|
||||
current_physical_size,
|
||||
has_in_progress_downloads: Some(has_in_progress_downloads),
|
||||
},
|
||||
)
|
||||
state,
|
||||
current_physical_size: Some(current_physical_size),
|
||||
has_in_progress_downloads: Some(state.has_in_progress_downloads()),
|
||||
};
|
||||
|
||||
Ok::<_, anyhow::Error>(tenant_info)
|
||||
})
|
||||
.await
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
json_response(StatusCode::OK, tenant_info)
|
||||
}
|
||||
|
||||
async fn tenant_size_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, false).map_err(ApiError::InternalServerError)?;
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::InternalServerError)?;
|
||||
|
||||
// this can be long operation, it currently is not backed by any request coalescing or similar
|
||||
let inputs = tenant
|
||||
@@ -618,8 +496,6 @@ async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Bo
|
||||
check_permission(&request, None)?;
|
||||
|
||||
let request_data: TenantCreateRequest = json_request(&mut request).await?;
|
||||
println!("tenant create: {:?}", request_data.trace_read_requests);
|
||||
let remote_index = get_state(&request).remote_index.clone();
|
||||
|
||||
let mut tenant_conf = TenantConfOpt::default();
|
||||
if let Some(gc_period) = request_data.gc_period {
|
||||
@@ -689,20 +565,42 @@ async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Bo
|
||||
.map(TenantId::from)
|
||||
.unwrap_or_else(TenantId::generate);
|
||||
|
||||
let new_tenant_id = tokio::task::spawn_blocking(move || {
|
||||
let new_tenant = tokio::task::spawn_blocking(move || {
|
||||
let _enter = info_span!("tenant_create", tenant = ?target_tenant_id).entered();
|
||||
let conf = get_config(&request);
|
||||
let state = get_state(&request);
|
||||
|
||||
tenant_mgr::create_tenant(conf, tenant_conf, target_tenant_id, remote_index)
|
||||
// FIXME: `create_tenant` can fail from both user and internal errors. Replace this
|
||||
// with better error handling once the type permits it
|
||||
.map_err(ApiError::InternalServerError)
|
||||
tenant_mgr::create_tenant(
|
||||
state.conf,
|
||||
tenant_conf,
|
||||
target_tenant_id,
|
||||
state.remote_storage.clone(),
|
||||
)
|
||||
// FIXME: `create_tenant` can fail from both user and internal errors. Replace this
|
||||
// with better error handling once the type permits it
|
||||
.map_err(ApiError::InternalServerError)
|
||||
})
|
||||
.await
|
||||
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??;
|
||||
|
||||
Ok(match new_tenant_id {
|
||||
Some(id) => json_response(StatusCode::CREATED, TenantCreateResponse(id))?,
|
||||
Ok(match new_tenant {
|
||||
Some(tenant) => {
|
||||
// We created the tenant. Existing API semantics are that the tenant
|
||||
// is Active when this function returns.
|
||||
if let res @ Err(_) = tenant.wait_to_become_active().await {
|
||||
// This shouldn't happen because we just created the tenant directory
|
||||
// in tenant_mgr::create_tenant, and there aren't any remote timelines
|
||||
// to load, so, nothing can really fail during load.
|
||||
// Don't do cleanup because we don't know how we got here.
|
||||
// The tenant will likely be in `Broken` state and subsequent
|
||||
// calls will fail.
|
||||
res.context("created tenant failed to become active")
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
}
|
||||
json_response(
|
||||
StatusCode::CREATED,
|
||||
TenantCreateResponse(tenant.tenant_id()),
|
||||
)?
|
||||
}
|
||||
None => json_response(StatusCode::CONFLICT, ())?,
|
||||
})
|
||||
}
|
||||
@@ -828,22 +726,16 @@ async fn timeline_gc_handler(mut request: Request<Body>) -> Result<Response<Body
|
||||
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
// FIXME: currently this will return a 500 error on bad tenant id; it should be 4XX
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, false).map_err(ApiError::NotFound)?;
|
||||
let gc_req: TimelineGcRequest = json_request(&mut request).await?;
|
||||
|
||||
let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
|
||||
|
||||
// Use tenant's pitr setting
|
||||
let pitr = tenant.get_pitr_interval();
|
||||
let result = tenant
|
||||
.gc_iteration(Some(timeline_id), gc_horizon, pitr, true)
|
||||
.instrument(info_span!("manual_gc", tenant = %tenant_id, timeline = %timeline_id))
|
||||
let wait_task_done = tenant_mgr::immediate_gc(tenant_id, timeline_id, gc_req)?;
|
||||
let gc_result = wait_task_done
|
||||
.await
|
||||
// FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
|
||||
// better once the types support it.
|
||||
.context("wait for gc task")
|
||||
.map_err(ApiError::InternalServerError)?
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
json_response(StatusCode::OK, result)
|
||||
|
||||
json_response(StatusCode::OK, gc_result)
|
||||
}
|
||||
|
||||
// Run compaction immediately on given timeline.
|
||||
@@ -857,7 +749,10 @@ async fn timeline_compact_handler(request: Request<Body>) -> Result<Response<Bod
|
||||
let timeline = tenant
|
||||
.get_timeline(timeline_id, true)
|
||||
.map_err(ApiError::NotFound)?;
|
||||
timeline.compact().map_err(ApiError::InternalServerError)?;
|
||||
timeline
|
||||
.compact()
|
||||
.await
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
|
||||
json_response(StatusCode::OK, ())
|
||||
}
|
||||
@@ -891,7 +786,6 @@ async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
pub fn make_router(
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
remote_index: RemoteIndex,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
|
||||
let spec = include_bytes!("openapi_spec.yml");
|
||||
@@ -928,8 +822,7 @@ pub fn make_router(
|
||||
|
||||
Ok(router
|
||||
.data(Arc::new(
|
||||
State::new(conf, auth, remote_index, remote_storage)
|
||||
.context("Failed to initialize router state")?,
|
||||
State::new(conf, auth, remote_storage).context("Failed to initialize router state")?,
|
||||
))
|
||||
.get("/v1/status", status_handler)
|
||||
.put(
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
mod auth;
|
||||
pub mod basebackup;
|
||||
pub mod config;
|
||||
pub mod http;
|
||||
pub mod import_datadir;
|
||||
pub mod keyspace;
|
||||
pub mod metrics;
|
||||
pub(crate) mod metrics;
|
||||
pub mod page_cache;
|
||||
pub mod page_service;
|
||||
pub mod pgdatadir_mapping;
|
||||
pub mod profiling;
|
||||
pub mod repository;
|
||||
pub mod storage_sync;
|
||||
pub mod storage_sync2;
|
||||
pub use storage_sync2 as storage_sync;
|
||||
pub mod task_mgr;
|
||||
pub mod tenant;
|
||||
pub mod tenant_config;
|
||||
@@ -22,10 +24,9 @@ pub mod walreceiver;
|
||||
pub mod walrecord;
|
||||
pub mod walredo;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use tracing::info;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
use crate::task_mgr::TaskKind;
|
||||
|
||||
@@ -71,7 +72,7 @@ pub async fn shutdown_pageserver(exit_code: i32) {
|
||||
//
|
||||
// FIXME: Does this wait for the sync tasks to finish syncing what's queued up?
|
||||
// Should it?
|
||||
task_mgr::shutdown_tasks(Some(TaskKind::StorageSync), None, None).await;
|
||||
task_mgr::shutdown_tasks(Some(TaskKind::RemoteUploadTask), None, None).await;
|
||||
|
||||
// Shut down the HTTP endpoint last, so that you can still check the server's
|
||||
// status while it's shutting down.
|
||||
@@ -106,23 +107,39 @@ fn exponential_backoff_duration_seconds(n: u32, base_increment: f64, max_seconds
|
||||
}
|
||||
}
|
||||
|
||||
/// A newtype to store arbitrary data grouped by tenant and timeline ids.
|
||||
/// One could use [`utils::id::TenantTimelineId`] for grouping, but that would
|
||||
/// not include the cases where a certain tenant has zero timelines.
|
||||
/// This is sometimes important: a tenant could be registered during initial load from FS,
|
||||
/// even if he has no timelines on disk.
|
||||
#[derive(Debug)]
|
||||
pub struct TenantTimelineValues<T>(HashMap<TenantId, HashMap<TimelineId, T>>);
|
||||
/// The name of the metadata file pageserver creates per timeline.
|
||||
/// Full path: `tenants/<tenant_id>/timelines/<timeline_id>/metadata`.
|
||||
pub const METADATA_FILE_NAME: &str = "metadata";
|
||||
|
||||
impl<T> TenantTimelineValues<T> {
|
||||
fn new() -> Self {
|
||||
Self(HashMap::new())
|
||||
/// Per-tenant configuration file.
|
||||
/// Full path: `tenants/<tenant_id>/config`.
|
||||
pub const TENANT_CONFIG_NAME: &str = "config";
|
||||
|
||||
/// A suffix used for various temporary files. Any temporary files found in the
|
||||
/// data directory at pageserver startup can be automatically removed.
|
||||
pub const TEMP_FILE_SUFFIX: &str = "___temp";
|
||||
|
||||
/// A marker file to mark that a timeline directory was not fully initialized.
|
||||
/// If a timeline directory with this marker is encountered at pageserver startup,
|
||||
/// the timeline directory and the marker file are both removed.
|
||||
/// Full path: `tenants/<tenant_id>/timelines/<timeline_id>___uninit`.
|
||||
pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit";
|
||||
|
||||
pub fn is_temporary(path: &Path) -> bool {
|
||||
match path.file_name() {
|
||||
Some(name) => name.to_string_lossy().ends_with(TEMP_FILE_SUFFIX),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// A suffix to be used during file sync from the remote storage,
|
||||
/// to ensure that we do not leave corrupted files that pretend to be layers.
|
||||
const TEMP_FILE_SUFFIX: &str = "___temp";
|
||||
pub fn is_uninit_mark(path: &Path) -> bool {
|
||||
match path.file_name() {
|
||||
Some(name) => name
|
||||
.to_string_lossy()
|
||||
.ends_with(TIMELINE_UNINIT_MARK_SUFFIX),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod backoff_defaults_tests {
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use metrics::core::{AtomicU64, GenericCounter};
|
||||
use metrics::{
|
||||
register_gauge_vec, register_histogram, register_histogram_vec, register_int_counter,
|
||||
register_int_counter_vec, register_int_gauge, register_int_gauge_vec, register_uint_gauge_vec,
|
||||
GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge,
|
||||
UIntGaugeVec,
|
||||
register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec,
|
||||
register_int_gauge, register_int_gauge_vec, register_uint_gauge_vec, Histogram, HistogramVec,
|
||||
IntCounter, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
@@ -200,63 +199,59 @@ pub static NUM_ONDISK_LAYERS: Lazy<IntGauge> = Lazy::new(|| {
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub static REMAINING_SYNC_ITEMS: Lazy<IntGauge> = Lazy::new(|| {
|
||||
register_int_gauge!(
|
||||
"pageserver_remote_storage_remaining_sync_items",
|
||||
"Number of storage sync items left in the queue"
|
||||
// remote storage metrics
|
||||
|
||||
pub static REMOTE_UPLOAD_QUEUE_UNFINISHED_TASKS: Lazy<IntGaugeVec> = Lazy::new(|| {
|
||||
register_int_gauge_vec!(
|
||||
"pageserver_remote_upload_queue_unfinished_tasks",
|
||||
"Number of tasks in the upload queue that are not finished yet.",
|
||||
&["tenant_id", "timeline_id", "file_kind", "op_kind"],
|
||||
)
|
||||
.expect("failed to register pageserver remote storage remaining sync items int gauge")
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub static IMAGE_SYNC_TIME: Lazy<GaugeVec> = Lazy::new(|| {
|
||||
register_gauge_vec!(
|
||||
"pageserver_remote_storage_image_sync_duration",
|
||||
"Time spent to synchronize (up/download) a whole pageserver image",
|
||||
&["tenant_id", "timeline_id"],
|
||||
)
|
||||
.expect("failed to register per-timeline pageserver image sync time vec")
|
||||
});
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum RemoteOpKind {
|
||||
Upload,
|
||||
Download,
|
||||
Delete,
|
||||
}
|
||||
impl RemoteOpKind {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Self::Upload => "upload",
|
||||
Self::Download => "download",
|
||||
Self::Delete => "delete",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub static IMAGE_SYNC_OPERATION_KINDS: &[&str] = &["upload", "download", "delete"];
|
||||
pub static IMAGE_SYNC_STATUS: &[&str] = &["success", "failure", "abort"];
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum RemoteOpFileKind {
|
||||
Layer,
|
||||
Index,
|
||||
}
|
||||
impl RemoteOpFileKind {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Self::Layer => "layer",
|
||||
Self::Index => "index",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub static IMAGE_SYNC_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
|
||||
register_int_counter_vec!(
|
||||
"pageserver_remote_storage_image_sync_count",
|
||||
"Number of synchronization operations executed for pageserver images. \
|
||||
Grouped by tenant, timeline, operation_kind and status",
|
||||
&["tenant_id", "timeline_id", "operation_kind", "status"]
|
||||
)
|
||||
.expect("failed to register pageserver image sync count vec")
|
||||
});
|
||||
pub static REMOTE_OPERATION_KINDS: &[&str] = &["upload", "download", "delete"];
|
||||
pub static REMOTE_OPERATION_FILE_KINDS: &[&str] = &["layer", "index"];
|
||||
pub static REMOTE_OPERATION_STATUSES: &[&str] = &["success", "failure"];
|
||||
|
||||
pub static IMAGE_SYNC_TIME_HISTOGRAM: Lazy<HistogramVec> = Lazy::new(|| {
|
||||
pub static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
|
||||
register_histogram_vec!(
|
||||
"pageserver_remote_storage_image_sync_seconds",
|
||||
"Time took to synchronize (download or upload) a whole pageserver image. \
|
||||
Grouped by operation_kind and status",
|
||||
&["operation_kind", "status"],
|
||||
vec![0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 3.0, 10.0, 20.0]
|
||||
"pageserver_remote_operation_seconds",
|
||||
"Time spent on remote storage operations. \
|
||||
Grouped by tenant, timeline, operation_kind and status",
|
||||
&["tenant_id", "timeline_id", "file_kind", "op_kind", "status"]
|
||||
)
|
||||
.expect("failed to register pageserver image sync time histogram vec")
|
||||
});
|
||||
|
||||
pub static REMOTE_INDEX_UPLOAD: Lazy<IntCounterVec> = Lazy::new(|| {
|
||||
register_int_counter_vec!(
|
||||
"pageserver_remote_storage_remote_index_uploads_total",
|
||||
"Number of remote index uploads",
|
||||
&["tenant_id", "timeline_id"],
|
||||
)
|
||||
.expect("failed to register pageserver remote index upload vec")
|
||||
});
|
||||
|
||||
pub static NO_LAYERS_UPLOAD: Lazy<IntCounterVec> = Lazy::new(|| {
|
||||
register_int_counter_vec!(
|
||||
"pageserver_remote_storage_no_layers_uploads_total",
|
||||
"Number of skipped uploads due to no layers",
|
||||
&["tenant_id", "timeline_id"],
|
||||
)
|
||||
.expect("failed to register pageserver no layers upload vec")
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
|
||||
@@ -473,16 +468,90 @@ impl Drop for TimelineMetrics {
|
||||
let _ = SMGR_QUERY_TIME.remove_label_values(&[op, tenant_id, timeline_id]);
|
||||
}
|
||||
|
||||
for op in IMAGE_SYNC_OPERATION_KINDS {
|
||||
for status in IMAGE_SYNC_STATUS {
|
||||
let _ = IMAGE_SYNC_COUNT.remove_label_values(&[tenant_id, timeline_id, op, status]);
|
||||
let _ = REMOTE_UPLOAD_QUEUE_UNFINISHED_TASKS.remove_label_values(&[tenant_id, timeline_id]);
|
||||
for file_kind in REMOTE_OPERATION_FILE_KINDS {
|
||||
for op in REMOTE_OPERATION_KINDS {
|
||||
for status in REMOTE_OPERATION_STATUSES {
|
||||
let _ = REMOTE_OPERATION_TIME.remove_label_values(&[
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
file_kind,
|
||||
op,
|
||||
status,
|
||||
]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _ = IMAGE_SYNC_TIME.remove_label_values(&[tenant_id, timeline_id]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_tenant_metrics(tenant_id: &TenantId) {
|
||||
let _ = STORAGE_TIME.remove_label_values(&["gc", &tenant_id.to_string(), "-"]);
|
||||
}
|
||||
|
||||
use futures::Future;
|
||||
use pin_project_lite::pin_project;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Instant;
|
||||
|
||||
/// Wrapper future that measures the time spent by a remote storage operation,
|
||||
/// and records the time and success/failure as a prometheus metric.
|
||||
pub trait MeasureRemoteOp: Sized {
|
||||
fn measure_remote_op(
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
file_kind: RemoteOpFileKind,
|
||||
op: RemoteOpKind,
|
||||
) -> MeasuredRemoteOp<Self> {
|
||||
let start = Instant::now();
|
||||
MeasuredRemoteOp {
|
||||
inner: self,
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
file_kind,
|
||||
op,
|
||||
start,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Sized> MeasureRemoteOp for T {}
|
||||
|
||||
pin_project! {
|
||||
pub struct MeasuredRemoteOp<F>
|
||||
{
|
||||
#[pin]
|
||||
inner: F,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
file_kind: RemoteOpFileKind,
|
||||
op: RemoteOpKind,
|
||||
start: Instant,
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
|
||||
type Output = Result<O, E>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
let poll_result = this.inner.poll(cx);
|
||||
if let Poll::Ready(ref res) = poll_result {
|
||||
let duration = this.start.elapsed();
|
||||
let status = if res.is_ok() { &"success" } else { &"failure" };
|
||||
REMOTE_OPERATION_TIME
|
||||
.get_metric_with_label_values(&[
|
||||
&this.tenant_id.to_string(),
|
||||
&this.timeline_id.to_string(),
|
||||
this.file_kind.as_str(),
|
||||
this.op.as_str(),
|
||||
status,
|
||||
])
|
||||
.unwrap()
|
||||
.observe(duration.as_secs_f64());
|
||||
}
|
||||
poll_result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,13 +25,14 @@ use std::net::TcpListener;
|
||||
use std::str;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::pin;
|
||||
use tokio_util::io::StreamReader;
|
||||
use tokio_util::io::SyncIoBridge;
|
||||
use tracing::*;
|
||||
use utils::id::ConnectionId;
|
||||
use utils::{
|
||||
auth::{self, Claims, JwtAuth, Scope},
|
||||
auth::{Claims, JwtAuth, Scope},
|
||||
id::{TenantId, TimelineId},
|
||||
lsn::Lsn,
|
||||
postgres_backend::AuthType,
|
||||
@@ -39,6 +40,7 @@ use utils::{
|
||||
simple_rcu::RcuReadGuard,
|
||||
};
|
||||
|
||||
use crate::auth::check_permission;
|
||||
use crate::basebackup;
|
||||
use crate::config::{PageServerConf, ProfilingConfig};
|
||||
use crate::import_datadir::import_wal_from_tar;
|
||||
@@ -46,7 +48,7 @@ use crate::metrics::{LIVE_CONNECTIONS_COUNT, SMGR_QUERY_TIME};
|
||||
use crate::profiling::profpoint_start;
|
||||
use crate::task_mgr;
|
||||
use crate::task_mgr::TaskKind;
|
||||
use crate::tenant::Timeline;
|
||||
use crate::tenant::{Tenant, Timeline};
|
||||
use crate::tenant_mgr;
|
||||
use crate::trace::Tracer;
|
||||
use crate::CheckpointConfig;
|
||||
@@ -278,7 +280,7 @@ impl PageServerHandler {
|
||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||
|
||||
// Make request tracer if needed
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
|
||||
let tenant = get_active_tenant_with_timeout(tenant_id).await?;
|
||||
let mut tracer = if tenant.get_trace_read_requests() {
|
||||
let connection_id = ConnectionId::generate();
|
||||
let path = tenant
|
||||
@@ -290,7 +292,7 @@ impl PageServerHandler {
|
||||
};
|
||||
|
||||
// Check that the timeline exists
|
||||
let timeline = get_local_timeline(tenant_id, timeline_id)?;
|
||||
let timeline = tenant.get_timeline(timeline_id, true)?;
|
||||
|
||||
// switch client to COPYBOTH
|
||||
pgb.write_message(&BeMessage::CopyBothResponse)?;
|
||||
@@ -313,6 +315,7 @@ impl PageServerHandler {
|
||||
|
||||
let copy_data_bytes = match msg? {
|
||||
Some(FeMessage::CopyData(bytes)) => bytes,
|
||||
Some(FeMessage::Terminate) => break,
|
||||
Some(m) => {
|
||||
bail!("unexpected message: {m:?} during COPY");
|
||||
}
|
||||
@@ -375,7 +378,7 @@ impl PageServerHandler {
|
||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||
// Create empty timeline
|
||||
info!("creating new timeline");
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
|
||||
let tenant = get_active_tenant_with_timeout(tenant_id).await?;
|
||||
let timeline = tenant.create_empty_timeline(timeline_id, base_lsn, pg_version)?;
|
||||
|
||||
// TODO mark timeline as not ready until it reaches end_lsn.
|
||||
@@ -430,7 +433,7 @@ impl PageServerHandler {
|
||||
) -> anyhow::Result<()> {
|
||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||
|
||||
let timeline = get_local_timeline(tenant_id, timeline_id)?;
|
||||
let timeline = get_active_timeline_with_timeout(tenant_id, timeline_id).await?;
|
||||
ensure!(timeline.get_last_record_lsn() == start_lsn);
|
||||
|
||||
// TODO leave clean state on error. For now you can use detach to clean
|
||||
@@ -623,7 +626,7 @@ impl PageServerHandler {
|
||||
full_backup: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
// check that the timeline exists
|
||||
let timeline = get_local_timeline(tenant_id, timeline_id)?;
|
||||
let timeline = get_active_timeline_with_timeout(tenant_id, timeline_id).await?;
|
||||
let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
|
||||
if let Some(lsn) = lsn {
|
||||
// Backup was requested at a particular LSN. Wait for it to arrive.
|
||||
@@ -670,7 +673,7 @@ impl PageServerHandler {
|
||||
.claims
|
||||
.as_ref()
|
||||
.expect("claims presence already checked");
|
||||
auth::check_permission(claims, tenant_id)
|
||||
check_permission(claims, tenant_id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -765,7 +768,7 @@ impl postgres_backend_async::Handler for PageServerHandler {
|
||||
let timeline_id = TimelineId::from_str(params[1])?;
|
||||
|
||||
self.check_permission(Some(tenant_id))?;
|
||||
let timeline = get_local_timeline(tenant_id, timeline_id)?;
|
||||
let timeline = get_active_timeline_with_timeout(tenant_id, timeline_id).await?;
|
||||
|
||||
let end_of_timeline = timeline.get_last_record_rlsn();
|
||||
|
||||
@@ -888,7 +891,7 @@ impl postgres_backend_async::Handler for PageServerHandler {
|
||||
|
||||
self.check_permission(Some(tenant_id))?;
|
||||
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
|
||||
let tenant = get_active_tenant_with_timeout(tenant_id).await?;
|
||||
pgb.write_message(&BeMessage::RowDescription(&[
|
||||
RowDescriptor::int8_col(b"checkpoint_distance"),
|
||||
RowDescriptor::int8_col(b"checkpoint_timeout"),
|
||||
@@ -932,8 +935,28 @@ impl postgres_backend_async::Handler for PageServerHandler {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_local_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> Result<Arc<Timeline>> {
|
||||
tenant_mgr::get_tenant(tenant_id, true)
|
||||
/// Get active tenant.
|
||||
///
|
||||
/// If the tenant is Loading, waits for it to become Active, for up to 30 s. That
|
||||
/// ensures that queries don't fail immediately after pageserver startup, because
|
||||
/// all tenants are still loading.
|
||||
async fn get_active_tenant_with_timeout(tenant_id: TenantId) -> Result<Arc<Tenant>> {
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, false)?;
|
||||
match tokio::time::timeout(Duration::from_secs(30), tenant.wait_to_become_active()).await {
|
||||
Ok(wait_result) => wait_result
|
||||
// no .context(), the error message is good enough and some tests depend on it
|
||||
.map(move |()| tenant),
|
||||
Err(_) => anyhow::bail!("Timeout waiting for tenant {tenant_id} to become Active"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Shorthand for getting a reference to a Timeline of an Active tenant.
|
||||
async fn get_active_timeline_with_timeout(
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
) -> Result<Arc<Timeline>> {
|
||||
get_active_tenant_with_timeout(tenant_id)
|
||||
.await
|
||||
.and_then(|tenant| tenant.get_timeline(timeline_id, true))
|
||||
}
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ impl Value {
|
||||
///
|
||||
/// Result of performing GC
|
||||
///
|
||||
#[derive(Default, Serialize)]
|
||||
#[derive(Default, Serialize, Debug)]
|
||||
pub struct GcResult {
|
||||
pub layers_total: u64,
|
||||
pub layers_needed_by_cutoff: u64,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,235 +0,0 @@
|
||||
//! Timeline synchronization logic to delete a bulk of timeline's remote files from the remote storage.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use crate::storage_sync::{SyncQueue, SyncTask};
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use utils::id::TenantTimelineId;
|
||||
|
||||
use super::{LayersDeletion, SyncData};
|
||||
|
||||
/// Attempts to remove the timleline layers from the remote storage.
|
||||
/// If the task had not adjusted the metadata before, the deletion will fail.
|
||||
pub(super) async fn delete_timeline_layers(
|
||||
storage: &GenericRemoteStorage,
|
||||
sync_queue: &SyncQueue,
|
||||
sync_id: TenantTimelineId,
|
||||
mut delete_data: SyncData<LayersDeletion>,
|
||||
) -> bool {
|
||||
if !delete_data.data.deletion_registered {
|
||||
error!("Cannot delete timeline layers before the deletion metadata is not registered, reenqueueing");
|
||||
delete_data.retries += 1;
|
||||
sync_queue.push(sync_id, SyncTask::Delete(delete_data));
|
||||
return false;
|
||||
}
|
||||
|
||||
if delete_data.data.layers_to_delete.is_empty() {
|
||||
info!("No layers to delete, skipping");
|
||||
return true;
|
||||
}
|
||||
|
||||
let layers_to_delete = delete_data
|
||||
.data
|
||||
.layers_to_delete
|
||||
.drain()
|
||||
.collect::<Vec<_>>();
|
||||
debug!("Layers to delete: {layers_to_delete:?}");
|
||||
info!("Deleting {} timeline layers", layers_to_delete.len());
|
||||
|
||||
let mut delete_tasks = layers_to_delete
|
||||
.into_iter()
|
||||
.map(|local_layer_path| async {
|
||||
match remove_storage_object(storage, &local_layer_path).await {
|
||||
Ok(()) => Ok(local_layer_path),
|
||||
Err(e) => Err((e, local_layer_path)),
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
let mut errored = false;
|
||||
while let Some(deletion_result) = delete_tasks.next().await {
|
||||
match deletion_result {
|
||||
Ok(local_layer_path) => {
|
||||
debug!(
|
||||
"Successfully deleted layer {} for timeline {sync_id}",
|
||||
local_layer_path.display()
|
||||
);
|
||||
delete_data.data.deleted_layers.insert(local_layer_path);
|
||||
}
|
||||
Err((e, local_layer_path)) => {
|
||||
errored = true;
|
||||
error!(
|
||||
"Failed to delete layer {} for timeline {sync_id}: {e:?}",
|
||||
local_layer_path.display()
|
||||
);
|
||||
delete_data.data.layers_to_delete.insert(local_layer_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errored {
|
||||
debug!("Reenqueuing failed delete task for timeline {sync_id}");
|
||||
delete_data.retries += 1;
|
||||
sync_queue.push(sync_id, SyncTask::Delete(delete_data));
|
||||
} else {
|
||||
info!("Successfully deleted all layers");
|
||||
}
|
||||
errored
|
||||
}
|
||||
|
||||
async fn remove_storage_object(
|
||||
storage: &GenericRemoteStorage,
|
||||
local_layer_path: &Path,
|
||||
) -> anyhow::Result<()> {
|
||||
let storage_path = storage
|
||||
.remote_object_id(local_layer_path)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get the layer storage path for local path '{}'",
|
||||
local_layer_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
storage.delete(&storage_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to delete remote layer from storage at '{:?}'",
|
||||
storage_path
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{collections::HashSet, num::NonZeroUsize};
|
||||
|
||||
use itertools::Itertools;
|
||||
use tempfile::tempdir;
|
||||
use tokio::fs;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use crate::{
|
||||
storage_sync::test_utils::{create_local_timeline, dummy_metadata},
|
||||
tenant::harness::{TenantHarness, TIMELINE_ID},
|
||||
};
|
||||
use remote_storage::{LocalFs, RemoteStorage};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_timeline_negative() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("delete_timeline_negative")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_path_buf(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
|
||||
let deleted = delete_timeline_layers(
|
||||
&storage,
|
||||
&sync_queue,
|
||||
sync_id,
|
||||
SyncData {
|
||||
retries: 1,
|
||||
data: LayersDeletion {
|
||||
deleted_layers: HashSet::new(),
|
||||
layers_to_delete: HashSet::new(),
|
||||
deletion_registered: false,
|
||||
},
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
!deleted,
|
||||
"Should not start the deletion for task with delete metadata unregistered"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_timeline() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("delete_timeline")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
let layer_files = ["a", "b", "c", "d"];
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_path_buf(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
|
||||
let current_retries = 3;
|
||||
let metadata = dummy_metadata(Lsn(0x30));
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let timeline_upload =
|
||||
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
|
||||
for (local_path, _metadata) in timeline_upload.layers_to_upload {
|
||||
let remote_path =
|
||||
local_storage.resolve_in_storage(&local_storage.remote_object_id(&local_path)?)?;
|
||||
let remote_parent_dir = remote_path.parent().unwrap();
|
||||
if !remote_parent_dir.exists() {
|
||||
fs::create_dir_all(&remote_parent_dir).await?;
|
||||
}
|
||||
fs::copy(&local_path, &remote_path).await?;
|
||||
}
|
||||
assert_eq!(
|
||||
local_storage
|
||||
.list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|remote_path| local_storage.local_path(&remote_path).unwrap())
|
||||
.filter_map(|local_path| { Some(local_path.file_name()?.to_str()?.to_owned()) })
|
||||
.sorted()
|
||||
.collect::<Vec<_>>(),
|
||||
layer_files
|
||||
.iter()
|
||||
.map(|layer_str| layer_str.to_string())
|
||||
.sorted()
|
||||
.collect::<Vec<_>>(),
|
||||
"Expect to have all layer files remotely before deletion"
|
||||
);
|
||||
|
||||
let deleted = delete_timeline_layers(
|
||||
&storage,
|
||||
&sync_queue,
|
||||
sync_id,
|
||||
SyncData {
|
||||
retries: current_retries,
|
||||
data: LayersDeletion {
|
||||
deleted_layers: HashSet::new(),
|
||||
layers_to_delete: HashSet::from([
|
||||
local_timeline_path.join("a"),
|
||||
local_timeline_path.join("c"),
|
||||
local_timeline_path.join("something_different"),
|
||||
]),
|
||||
deletion_registered: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
.await;
|
||||
assert!(deleted, "Should be able to delete timeline files");
|
||||
|
||||
assert_eq!(
|
||||
local_storage
|
||||
.list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|remote_path| local_storage.local_path(&remote_path).unwrap())
|
||||
.filter_map(|local_path| { Some(local_path.file_name()?.to_str()?.to_owned()) })
|
||||
.sorted()
|
||||
.collect::<Vec<_>>(),
|
||||
vec!["b".to_string(), "d".to_string()],
|
||||
"Expect to have only non-deleted files remotely"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,695 +0,0 @@
|
||||
//! Timeline synchronization logic to fetch the layer files from remote storage into pageserver's local directory.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fmt::Debug,
|
||||
mem,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage};
|
||||
use tokio::{
|
||||
fs,
|
||||
io::{self, AsyncWriteExt},
|
||||
};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
config::PageServerConf,
|
||||
storage_sync::{index::LayerFileMetadata, SyncTask},
|
||||
TEMP_FILE_SUFFIX,
|
||||
};
|
||||
use utils::{
|
||||
crashsafe::path_with_suffix_extension,
|
||||
id::{TenantId, TenantTimelineId, TimelineId},
|
||||
};
|
||||
|
||||
use super::{
|
||||
index::{IndexPart, RemoteTimeline},
|
||||
LayersDownload, SyncData, SyncQueue,
|
||||
};
|
||||
|
||||
// We collect timelines remotely available for each tenant
|
||||
// in case we failed to gather all index parts (due to an error)
|
||||
// Poisoned variant is returned.
|
||||
// When data is received succesfully without errors Present variant is used.
|
||||
pub enum TenantIndexParts {
|
||||
Poisoned {
|
||||
present: HashMap<TimelineId, IndexPart>,
|
||||
missing: HashSet<TimelineId>,
|
||||
},
|
||||
Present(HashMap<TimelineId, IndexPart>),
|
||||
}
|
||||
|
||||
impl TenantIndexParts {
|
||||
fn add_poisoned(&mut self, timeline_id: TimelineId) {
|
||||
match self {
|
||||
TenantIndexParts::Poisoned { missing, .. } => {
|
||||
missing.insert(timeline_id);
|
||||
}
|
||||
TenantIndexParts::Present(present) => {
|
||||
*self = TenantIndexParts::Poisoned {
|
||||
present: mem::take(present),
|
||||
missing: HashSet::from([timeline_id]),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TenantIndexParts {
|
||||
fn default() -> Self {
|
||||
TenantIndexParts::Present(HashMap::default())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn download_index_parts(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
keys: HashSet<TenantTimelineId>,
|
||||
) -> HashMap<TenantId, TenantIndexParts> {
|
||||
let mut index_parts: HashMap<TenantId, TenantIndexParts> = HashMap::new();
|
||||
|
||||
let mut part_downloads = keys
|
||||
.into_iter()
|
||||
.map(|id| async move { (id, download_index_part(conf, storage, id).await) })
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
while let Some((id, part_upload_result)) = part_downloads.next().await {
|
||||
match part_upload_result {
|
||||
Ok(index_part) => {
|
||||
debug!("Successfully fetched index part for {id}");
|
||||
match index_parts.entry(id.tenant_id).or_default() {
|
||||
TenantIndexParts::Poisoned { present, .. } => {
|
||||
present.insert(id.timeline_id, index_part);
|
||||
}
|
||||
TenantIndexParts::Present(parts) => {
|
||||
parts.insert(id.timeline_id, index_part);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(download_error) => {
|
||||
match download_error {
|
||||
DownloadError::NotFound => {
|
||||
// thats ok because it means that we didnt upload something we have locally for example
|
||||
}
|
||||
e => {
|
||||
let tenant_parts = index_parts.entry(id.tenant_id).or_default();
|
||||
tenant_parts.add_poisoned(id.timeline_id);
|
||||
error!(
|
||||
"Failed to fetch index part for {id}: {e} poisoning tenant index parts"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
index_parts
|
||||
}
|
||||
|
||||
/// Note: The function is rather expensive from s3 access point of view, it will execute ceil(N/1000) + N requests.
|
||||
/// At least one request to obtain a list of tenant timelines (more requests is there are more than 1000 timelines).
|
||||
/// And then will attempt to download all index files that belong to these timelines.
|
||||
pub async fn gather_tenant_timelines_index_parts(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<HashMap<TimelineId, IndexPart>> {
|
||||
let tenant_path = conf.timelines_path(&tenant_id);
|
||||
let timeline_sync_ids = get_timeline_sync_ids(storage, &tenant_path, tenant_id)
|
||||
.await
|
||||
.with_context(|| format!("Failed to list timeline sync ids for tenat {tenant_id}"))?;
|
||||
|
||||
match download_index_parts(conf, storage, timeline_sync_ids)
|
||||
.await
|
||||
.remove(&tenant_id)
|
||||
.ok_or_else(|| anyhow::anyhow!("Missing tenant index parts. This is a bug."))?
|
||||
{
|
||||
TenantIndexParts::Poisoned { missing, .. } => {
|
||||
anyhow::bail!("Failed to download index parts for all timelines. Missing {missing:?}")
|
||||
}
|
||||
TenantIndexParts::Present(parts) => Ok(parts),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves index data from the remote storage for a given timeline.
|
||||
async fn download_index_part(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
sync_id: TenantTimelineId,
|
||||
) -> Result<IndexPart, DownloadError> {
|
||||
let index_part_path = conf
|
||||
.metadata_path(sync_id.timeline_id, sync_id.tenant_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
let mut index_part_download = storage
|
||||
.download_storage_object(None, &index_part_path)
|
||||
.await?;
|
||||
|
||||
let mut index_part_bytes = Vec::new();
|
||||
io::copy(
|
||||
&mut index_part_download.download_stream,
|
||||
&mut index_part_bytes,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to download an index part into file '{}'",
|
||||
index_part_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
let index_part: IndexPart = serde_json::from_slice(&index_part_bytes)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to deserialize index part file into file '{}'",
|
||||
index_part_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
let missing_files = index_part.missing_files();
|
||||
if !missing_files.is_empty() {
|
||||
warn!("Found missing layers in index part for timeline {sync_id}: {missing_files:?}");
|
||||
}
|
||||
|
||||
Ok(index_part)
|
||||
}
|
||||
|
||||
/// Timeline download result, with extra data, needed for downloading.
|
||||
#[derive(Debug)]
|
||||
pub(super) enum DownloadedTimeline {
|
||||
/// Remote timeline data is either absent or corrupt, no download possible.
|
||||
Abort,
|
||||
/// Remote timeline data is found, its latest checkpoint's metadata contents (disk_consistent_lsn) is known.
|
||||
/// Initial download failed due to some error, the download task is rescheduled for another retry.
|
||||
FailedAndRescheduled,
|
||||
/// Remote timeline data is found, its latest checkpoint's metadata contents (disk_consistent_lsn) is known.
|
||||
/// Initial download successful.
|
||||
Successful(SyncData<LayersDownload>),
|
||||
}
|
||||
|
||||
/// Attempts to download all given timeline's layers.
|
||||
/// Timeline files that already exist locally are skipped during the download, but the local metadata file is
|
||||
/// updated in the end, if the remote one contains a newer disk_consistent_lsn.
|
||||
///
|
||||
/// On an error, bumps the retries count and updates the files to skip with successful downloads, rescheduling the task.
|
||||
pub(super) async fn download_timeline_layers<'a>(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &'a GenericRemoteStorage,
|
||||
sync_queue: &'a SyncQueue,
|
||||
remote_timeline: Option<&'a RemoteTimeline>,
|
||||
sync_id: TenantTimelineId,
|
||||
mut download_data: SyncData<LayersDownload>,
|
||||
) -> DownloadedTimeline {
|
||||
let remote_timeline = match remote_timeline {
|
||||
Some(remote_timeline) => {
|
||||
if !remote_timeline.awaits_download {
|
||||
error!("Timeline with sync id {sync_id} is not awaiting download");
|
||||
return DownloadedTimeline::Abort;
|
||||
}
|
||||
remote_timeline
|
||||
}
|
||||
None => {
|
||||
error!("Timeline with sync id {sync_id} is not present in the remote index");
|
||||
return DownloadedTimeline::Abort;
|
||||
}
|
||||
};
|
||||
|
||||
let download = &mut download_data.data;
|
||||
|
||||
let layers_to_download = remote_timeline
|
||||
.stored_files()
|
||||
.iter()
|
||||
.filter_map(|(layer_path, metadata)| {
|
||||
if !download.layers_to_skip.contains(layer_path) {
|
||||
Some((layer_path.to_owned(), metadata.to_owned()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
debug!("Layers to download: {layers_to_download:?}");
|
||||
info!("Downloading {} timeline layers", layers_to_download.len());
|
||||
|
||||
if layers_to_download.is_empty() {
|
||||
info!("No layers to download after filtering, skipping");
|
||||
return DownloadedTimeline::Successful(download_data);
|
||||
}
|
||||
|
||||
let mut download_tasks = layers_to_download
|
||||
.into_iter()
|
||||
.map(|(layer_destination_path, metadata)| async move {
|
||||
|
||||
match layer_destination_path.metadata() {
|
||||
Ok(m) if m.is_file() => {
|
||||
// the file exists from earlier round when we failed after renaming it as
|
||||
// layer_destination_path
|
||||
let verified = if let Some(expected) = metadata.file_size() {
|
||||
m.len() == expected
|
||||
} else {
|
||||
// behaviour before recording metadata was to accept any existing
|
||||
true
|
||||
};
|
||||
|
||||
if verified {
|
||||
debug!(
|
||||
"Layer already exists locally, skipping download: {}",
|
||||
layer_destination_path.display()
|
||||
);
|
||||
return Ok((layer_destination_path, LayerFileMetadata::new(m.len())))
|
||||
} else {
|
||||
// no need to remove it, it will be overwritten by fs::rename
|
||||
// after successful download
|
||||
warn!("Downloaded layer exists already but layer file metadata mismatches: {}, metadata {:?}", layer_destination_path.display(), metadata);
|
||||
}
|
||||
}
|
||||
Ok(m) => {
|
||||
return Err(anyhow::anyhow!("Downloaded layer destination exists but is not a file: {m:?}, target needs to be removed/archived manually: {layer_destination_path:?}"));
|
||||
}
|
||||
Err(_) => {
|
||||
// behave as the file didn't exist
|
||||
}
|
||||
}
|
||||
|
||||
// Perform a rename inspired by durable_rename from file_utils.c.
|
||||
// The sequence:
|
||||
// write(tmp)
|
||||
// fsync(tmp)
|
||||
// rename(tmp, new)
|
||||
// fsync(new)
|
||||
// fsync(parent)
|
||||
// For more context about durable_rename check this email from postgres mailing list:
|
||||
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
|
||||
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
|
||||
let temp_file_path =
|
||||
path_with_suffix_extension(&layer_destination_path, TEMP_FILE_SUFFIX);
|
||||
|
||||
// TODO: this doesn't use the cached fd for some reason?
|
||||
let mut destination_file =
|
||||
fs::File::create(&temp_file_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to create a destination file for layer '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut layer_download = storage.download_storage_object(None, &layer_destination_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to initiate the download the layer for {sync_id} into file '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let bytes_amount = io::copy(&mut layer_download.download_stream, &mut destination_file)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to download the layer for {sync_id} into file '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
|
||||
// A file will not be closed immediately when it goes out of scope if there are any IO operations
|
||||
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
|
||||
// you should call flush before dropping it.
|
||||
//
|
||||
// From the tokio code I see that it waits for pending operations to complete. There shouldn't be any because
|
||||
// we assume that `destination_file` file is fully written. I.e there is no pending .write(...).await operations.
|
||||
// But for additional safety let's check/wait for any pending operations.
|
||||
destination_file.flush().await.with_context(|| {
|
||||
format!(
|
||||
"failed to flush source file at {}",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
match metadata.file_size() {
|
||||
Some(expected) if expected != bytes_amount => {
|
||||
anyhow::bail!(
|
||||
"According to layer file metadata should had downloaded {expected} bytes but downloaded {bytes_amount} bytes into file '{}'",
|
||||
temp_file_path.display()
|
||||
);
|
||||
},
|
||||
Some(_) | None => {
|
||||
// matches, or upgrading from an earlier IndexPart version
|
||||
}
|
||||
}
|
||||
|
||||
// not using sync_data because it can lose file size update
|
||||
destination_file.sync_all().await.with_context(|| {
|
||||
format!(
|
||||
"failed to fsync source file at {}",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
drop(destination_file);
|
||||
|
||||
fail::fail_point!("remote-storage-download-pre-rename", |_| {
|
||||
anyhow::bail!("remote-storage-download-pre-rename failpoint triggered")
|
||||
});
|
||||
|
||||
fs::rename(&temp_file_path, &layer_destination_path).await?;
|
||||
|
||||
fsync_path(&layer_destination_path).await.with_context(|| {
|
||||
format!(
|
||||
"Cannot fsync layer destination path {}",
|
||||
layer_destination_path.display(),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok::<_, anyhow::Error>((layer_destination_path, LayerFileMetadata::new(bytes_amount)))
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
let mut errors_happened = false;
|
||||
// keep files we've downloaded to remove them from layers_to_skip if directory fsync fails
|
||||
let mut undo = HashSet::new();
|
||||
while let Some(download_result) = download_tasks.next().await {
|
||||
match download_result {
|
||||
Ok((downloaded_path, metadata)) => {
|
||||
undo.insert(downloaded_path.clone());
|
||||
download.layers_to_skip.insert(downloaded_path.clone());
|
||||
// what if the key existed already? ignore, because then we would had
|
||||
// downloaded a partial file, and had to retry
|
||||
download.gathered_metadata.insert(downloaded_path, metadata);
|
||||
}
|
||||
Err(e) => {
|
||||
errors_happened = true;
|
||||
error!("Failed to download a layer for timeline {sync_id}: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fsync timeline directory which is a parent directory for downloaded files
|
||||
let TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
} = &sync_id;
|
||||
let timeline_dir = conf.timeline_path(timeline_id, tenant_id);
|
||||
if let Err(e) = fsync_path(&timeline_dir).await {
|
||||
error!(
|
||||
"Cannot fsync parent directory {} error {}",
|
||||
timeline_dir.display(),
|
||||
e
|
||||
);
|
||||
for item in undo {
|
||||
download.layers_to_skip.remove(&item);
|
||||
// intentionally don't clear the gathered_metadata because it exists for fsync_path
|
||||
// failure on parent directory
|
||||
}
|
||||
errors_happened = true;
|
||||
}
|
||||
|
||||
if errors_happened {
|
||||
debug!("Reenqueuing failed download task for timeline {sync_id}");
|
||||
download_data.retries += 1;
|
||||
sync_queue.push(sync_id, SyncTask::Download(download_data));
|
||||
DownloadedTimeline::FailedAndRescheduled
|
||||
} else {
|
||||
info!("Successfully downloaded all layers");
|
||||
DownloadedTimeline::Successful(download_data)
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_timeline_sync_ids(
|
||||
storage: &GenericRemoteStorage,
|
||||
tenant_path: &Path,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<HashSet<TenantTimelineId>> {
|
||||
let tenant_storage_path = storage.remote_object_id(tenant_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to get tenant storage path for local path '{}'",
|
||||
tenant_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let timelines = storage
|
||||
.list_prefixes(Some(&tenant_storage_path))
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to list tenant storage path {tenant_storage_path:?} to get remote timelines to download"
|
||||
)
|
||||
})?;
|
||||
|
||||
if timelines.is_empty() {
|
||||
anyhow::bail!("no timelines found on the remote storage")
|
||||
}
|
||||
|
||||
let mut sync_ids = HashSet::new();
|
||||
|
||||
for timeline_remote_storage_key in timelines {
|
||||
let object_name = timeline_remote_storage_key.object_name().ok_or_else(|| {
|
||||
anyhow::anyhow!("failed to get timeline id for remote tenant {tenant_id}")
|
||||
})?;
|
||||
|
||||
let timeline_id: TimelineId = object_name.parse().with_context(|| {
|
||||
format!("failed to parse object name into timeline id '{object_name}'")
|
||||
})?;
|
||||
|
||||
sync_ids.insert(TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(sync_ids)
|
||||
}
|
||||
|
||||
async fn fsync_path(path: impl AsRef<Path>) -> Result<(), io::Error> {
|
||||
fs::File::open(path).await?.sync_all().await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{
|
||||
collections::{BTreeSet, HashSet},
|
||||
num::NonZeroUsize,
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use remote_storage::{LocalFs, RemoteStorage};
|
||||
use tempfile::tempdir;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use crate::{
|
||||
storage_sync::{
|
||||
index::RelativePath,
|
||||
test_utils::{create_local_timeline, dummy_metadata},
|
||||
},
|
||||
tenant::harness::{TenantHarness, TIMELINE_ID},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn download_timeline() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("download_timeline")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
let layer_files = ["a", "b", "layer_to_skip", "layer_to_keep_locally"];
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_owned(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
let current_retries = 3;
|
||||
let metadata = dummy_metadata(Lsn(0x30));
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let timeline_upload =
|
||||
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
|
||||
|
||||
for local_path in timeline_upload.layers_to_upload.keys() {
|
||||
let remote_path =
|
||||
local_storage.resolve_in_storage(&storage.remote_object_id(local_path)?)?;
|
||||
let remote_parent_dir = remote_path.parent().unwrap();
|
||||
if !remote_parent_dir.exists() {
|
||||
fs::create_dir_all(&remote_parent_dir).await?;
|
||||
}
|
||||
fs::copy(&local_path, &remote_path).await?;
|
||||
}
|
||||
let mut read_dir = fs::read_dir(&local_timeline_path).await?;
|
||||
while let Some(dir_entry) = read_dir.next_entry().await? {
|
||||
if dir_entry.file_name().to_str() == Some("layer_to_keep_locally") {
|
||||
continue;
|
||||
} else {
|
||||
fs::remove_file(dir_entry.path()).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut remote_timeline = RemoteTimeline::new(metadata.clone());
|
||||
remote_timeline.awaits_download = true;
|
||||
remote_timeline.add_timeline_layers(layer_files.iter().map(|layer| {
|
||||
let layer_path = local_timeline_path.join(layer);
|
||||
|
||||
// this could had also been LayerFileMetadata::default(), but since in this test we
|
||||
// don't do the merge operation done by storage_sync::download_timeline_data, it would
|
||||
// not be merged back to timeline.
|
||||
let metadata_from_upload = timeline_upload
|
||||
.layers_to_upload
|
||||
.get(&layer_path)
|
||||
.expect("layer must exist in previously uploaded paths")
|
||||
.to_owned();
|
||||
(layer_path, metadata_from_upload)
|
||||
}));
|
||||
|
||||
let download_data = match download_timeline_layers(
|
||||
harness.conf,
|
||||
&storage,
|
||||
&sync_queue,
|
||||
Some(&remote_timeline),
|
||||
sync_id,
|
||||
SyncData::new(
|
||||
current_retries,
|
||||
LayersDownload::from_skipped_layers(HashSet::from([
|
||||
local_timeline_path.join("layer_to_skip")
|
||||
])),
|
||||
),
|
||||
)
|
||||
.await
|
||||
{
|
||||
DownloadedTimeline::Successful(data) => data,
|
||||
wrong_result => {
|
||||
panic!("Expected a successful download for timeline, but got: {wrong_result:?}")
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
current_retries, download_data.retries,
|
||||
"On successful download, retries are not expected to change"
|
||||
);
|
||||
assert_eq!(
|
||||
download_data
|
||||
.data
|
||||
.layers_to_skip
|
||||
.into_iter()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
layer_files
|
||||
.iter()
|
||||
.map(|layer| local_timeline_path.join(layer))
|
||||
.collect(),
|
||||
"On successful download, layers to skip should contain all downloaded files and present layers that were skipped"
|
||||
);
|
||||
|
||||
let mut downloaded_files = BTreeSet::new();
|
||||
let mut read_dir = fs::read_dir(&local_timeline_path).await?;
|
||||
while let Some(dir_entry) = read_dir.next_entry().await? {
|
||||
downloaded_files.insert(dir_entry.path());
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
downloaded_files,
|
||||
layer_files
|
||||
.iter()
|
||||
.filter(|layer| layer != &&"layer_to_skip")
|
||||
.map(|layer| local_timeline_path.join(layer))
|
||||
.collect(),
|
||||
"On successful download, all layers that were not skipped, should be downloaded"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn download_timeline_negatives() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("download_timeline_negatives")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_owned(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
|
||||
let empty_remote_timeline_download = download_timeline_layers(
|
||||
harness.conf,
|
||||
&storage,
|
||||
&sync_queue,
|
||||
None,
|
||||
sync_id,
|
||||
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())),
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(empty_remote_timeline_download, DownloadedTimeline::Abort),
|
||||
"Should not allow downloading for empty remote timeline"
|
||||
);
|
||||
|
||||
let not_expecting_download_remote_timeline = RemoteTimeline::new(dummy_metadata(Lsn(5)));
|
||||
assert!(
|
||||
!not_expecting_download_remote_timeline.awaits_download,
|
||||
"Should not expect download for the timeline"
|
||||
);
|
||||
let already_downloading_remote_timeline_download = download_timeline_layers(
|
||||
harness.conf,
|
||||
&storage,
|
||||
&sync_queue,
|
||||
Some(¬_expecting_download_remote_timeline),
|
||||
sync_id,
|
||||
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())),
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(
|
||||
already_downloading_remote_timeline_download,
|
||||
DownloadedTimeline::Abort,
|
||||
),
|
||||
"Should not allow downloading for remote timeline that does not expect it"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_download_index_part() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_download_index_part")?;
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_owned(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
let metadata = dummy_metadata(Lsn(0x30));
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
|
||||
let index_part = IndexPart::new(
|
||||
HashSet::from([
|
||||
RelativePath::new(&local_timeline_path, local_timeline_path.join("one"))?,
|
||||
RelativePath::new(&local_timeline_path, local_timeline_path.join("two"))?,
|
||||
]),
|
||||
HashSet::from([RelativePath::new(
|
||||
&local_timeline_path,
|
||||
local_timeline_path.join("three"),
|
||||
)?]),
|
||||
metadata.disk_consistent_lsn(),
|
||||
metadata.to_bytes()?,
|
||||
);
|
||||
|
||||
let local_index_part_path = harness
|
||||
.conf
|
||||
.metadata_path(sync_id.timeline_id, sync_id.tenant_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
let index_part_remote_id = local_storage.remote_object_id(&local_index_part_path)?;
|
||||
let index_part_local_path = PathBuf::from(index_part_remote_id.to_string());
|
||||
fs::create_dir_all(index_part_local_path.parent().unwrap()).await?;
|
||||
fs::write(&index_part_local_path, serde_json::to_vec(&index_part)?).await?;
|
||||
|
||||
let downloaded_index_part = download_index_part(harness.conf, &storage, sync_id).await?;
|
||||
|
||||
assert_eq!(
|
||||
downloaded_index_part, index_part,
|
||||
"Downloaded index part should be the same as the one in storage"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,712 +0,0 @@
|
||||
//! In-memory index to track the tenant files on the remote storage.
|
||||
//! Able to restore itself from the storage index parts, that are located in every timeline's remote directory and contain all data about
|
||||
//! remote timeline layers and its metadata.
|
||||
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context, Ok};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::log::warn;
|
||||
|
||||
use crate::{config::PageServerConf, tenant::metadata::TimelineMetadata};
|
||||
use utils::{
|
||||
id::{TenantId, TenantTimelineId, TimelineId},
|
||||
lsn::Lsn,
|
||||
};
|
||||
|
||||
use super::download::TenantIndexParts;
|
||||
|
||||
/// A part of the filesystem path, that needs a root to become a path again.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct RelativePath(String);
|
||||
|
||||
impl RelativePath {
|
||||
/// Attempts to strip off the base from path, producing a relative path or an error.
|
||||
pub fn new<P: AsRef<Path>>(base: &Path, path: P) -> anyhow::Result<Self> {
|
||||
let path = path.as_ref();
|
||||
let relative = path.strip_prefix(base).with_context(|| {
|
||||
format!(
|
||||
"path '{}' is not relative to base '{}'",
|
||||
path.display(),
|
||||
base.display()
|
||||
)
|
||||
})?;
|
||||
Ok(RelativePath(relative.to_string_lossy().to_string()))
|
||||
}
|
||||
|
||||
/// Joins the relative path with the base path.
|
||||
fn as_path(&self, base: &Path) -> PathBuf {
|
||||
base.join(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct TenantEntry(HashMap<TimelineId, RemoteTimeline>);
|
||||
|
||||
impl TenantEntry {
|
||||
pub fn has_in_progress_downloads(&self) -> bool {
|
||||
self.values()
|
||||
.any(|remote_timeline| remote_timeline.awaits_download)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for TenantEntry {
|
||||
type Target = HashMap<TimelineId, RemoteTimeline>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for TenantEntry {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HashMap<TimelineId, RemoteTimeline>> for TenantEntry {
|
||||
fn from(inner: HashMap<TimelineId, RemoteTimeline>) -> Self {
|
||||
Self(inner)
|
||||
}
|
||||
}
|
||||
|
||||
/// An index to track tenant files that exist on the remote storage.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RemoteTimelineIndex {
|
||||
entries: HashMap<TenantId, TenantEntry>,
|
||||
}
|
||||
|
||||
/// A wrapper to synchronize the access to the index, should be created and used before dealing with any [`RemoteTimelineIndex`].
|
||||
#[derive(Default)]
|
||||
pub struct RemoteIndex(Arc<RwLock<RemoteTimelineIndex>>);
|
||||
|
||||
impl RemoteIndex {
|
||||
pub fn from_parts(
|
||||
conf: &'static PageServerConf,
|
||||
index_parts: HashMap<TenantId, TenantIndexParts>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let mut entries: HashMap<TenantId, TenantEntry> = HashMap::new();
|
||||
|
||||
for (tenant_id, index_parts) in index_parts {
|
||||
match index_parts {
|
||||
// TODO: should we schedule a retry so it can be recovered? otherwise we can revive it only through detach/attach or pageserver restart
|
||||
TenantIndexParts::Poisoned { missing, ..} => warn!("skipping tenant_id set up for remote index because the index download has failed for timeline(s): {missing:?}"),
|
||||
TenantIndexParts::Present(timelines) => {
|
||||
for (timeline_id, index_part) in timelines {
|
||||
let timeline_path = conf.timeline_path(&timeline_id, &tenant_id);
|
||||
let remote_timeline =
|
||||
RemoteTimeline::from_index_part(&timeline_path, index_part)
|
||||
.context("Failed to restore remote timeline data from index part")?;
|
||||
|
||||
entries
|
||||
.entry(tenant_id)
|
||||
.or_default()
|
||||
.insert(timeline_id, remote_timeline);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self(Arc::new(RwLock::new(RemoteTimelineIndex { entries }))))
|
||||
}
|
||||
|
||||
pub async fn read(&self) -> tokio::sync::RwLockReadGuard<'_, RemoteTimelineIndex> {
|
||||
self.0.read().await
|
||||
}
|
||||
|
||||
pub async fn write(&self) -> tokio::sync::RwLockWriteGuard<'_, RemoteTimelineIndex> {
|
||||
self.0.write().await
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for RemoteIndex {
|
||||
fn clone(&self) -> Self {
|
||||
Self(Arc::clone(&self.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl RemoteTimelineIndex {
|
||||
pub fn timeline_entry(
|
||||
&self,
|
||||
TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}: &TenantTimelineId,
|
||||
) -> Option<&RemoteTimeline> {
|
||||
self.entries.get(tenant_id)?.get(timeline_id)
|
||||
}
|
||||
|
||||
pub fn timeline_entry_mut(
|
||||
&mut self,
|
||||
TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}: &TenantTimelineId,
|
||||
) -> Option<&mut RemoteTimeline> {
|
||||
self.entries.get_mut(tenant_id)?.get_mut(timeline_id)
|
||||
}
|
||||
|
||||
pub fn add_timeline_entry(
|
||||
&mut self,
|
||||
TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}: TenantTimelineId,
|
||||
entry: RemoteTimeline,
|
||||
) {
|
||||
self.entries
|
||||
.entry(tenant_id)
|
||||
.or_default()
|
||||
.insert(timeline_id, entry);
|
||||
}
|
||||
|
||||
pub fn remove_timeline_entry(
|
||||
&mut self,
|
||||
TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}: TenantTimelineId,
|
||||
) -> Option<RemoteTimeline> {
|
||||
self.entries
|
||||
.entry(tenant_id)
|
||||
.or_default()
|
||||
.remove(&timeline_id)
|
||||
}
|
||||
|
||||
pub fn tenant_entry(&self, tenant_id: &TenantId) -> Option<&TenantEntry> {
|
||||
self.entries.get(tenant_id)
|
||||
}
|
||||
|
||||
pub fn tenant_entry_mut(&mut self, tenant_id: &TenantId) -> Option<&mut TenantEntry> {
|
||||
self.entries.get_mut(tenant_id)
|
||||
}
|
||||
|
||||
pub fn add_tenant_entry(&mut self, tenant_id: TenantId) -> &mut TenantEntry {
|
||||
self.entries.entry(tenant_id).or_default()
|
||||
}
|
||||
|
||||
pub fn remove_tenant_entry(&mut self, tenant_id: &TenantId) -> Option<TenantEntry> {
|
||||
self.entries.remove(tenant_id)
|
||||
}
|
||||
|
||||
pub fn set_awaits_download(
|
||||
&mut self,
|
||||
id: &TenantTimelineId,
|
||||
awaits_download: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
self.timeline_entry_mut(id)
|
||||
.ok_or_else(|| anyhow!("unknown timeline sync {id}"))?
|
||||
.awaits_download = awaits_download;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Restored index part data about the timeline, stored in the remote index.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RemoteTimeline {
|
||||
timeline_layers: HashMap<PathBuf, LayerFileMetadata>,
|
||||
missing_layers: HashMap<PathBuf, LayerFileMetadata>,
|
||||
|
||||
pub metadata: TimelineMetadata,
|
||||
pub awaits_download: bool,
|
||||
}
|
||||
|
||||
impl RemoteTimeline {
|
||||
pub fn new(metadata: TimelineMetadata) -> Self {
|
||||
Self {
|
||||
timeline_layers: HashMap::default(),
|
||||
missing_layers: HashMap::default(),
|
||||
metadata,
|
||||
awaits_download: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_timeline_layers(
|
||||
&mut self,
|
||||
new_layers: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
|
||||
) {
|
||||
self.timeline_layers.extend(new_layers);
|
||||
}
|
||||
|
||||
pub fn add_upload_failures(
|
||||
&mut self,
|
||||
upload_failures: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
|
||||
) {
|
||||
self.missing_layers.extend(upload_failures);
|
||||
}
|
||||
|
||||
pub fn remove_layers(&mut self, layers_to_remove: &HashSet<PathBuf>) {
|
||||
self.timeline_layers
|
||||
.retain(|layer, _| !layers_to_remove.contains(layer));
|
||||
self.missing_layers
|
||||
.retain(|layer, _| !layers_to_remove.contains(layer));
|
||||
}
|
||||
|
||||
/// Lists all layer files in the given remote timeline. Omits the metadata file.
|
||||
pub fn stored_files(&self) -> &HashMap<PathBuf, LayerFileMetadata> {
|
||||
&self.timeline_layers
|
||||
}
|
||||
|
||||
/// Combines metadata gathered or verified during downloading needed layer files to metadata on
|
||||
/// the [`RemoteIndex`], so it can be uploaded later.
|
||||
pub fn merge_metadata_from_downloaded(
|
||||
&mut self,
|
||||
downloaded: &HashMap<PathBuf, LayerFileMetadata>,
|
||||
) {
|
||||
downloaded.iter().for_each(|(path, metadata)| {
|
||||
if let Some(upgraded) = self.timeline_layers.get_mut(path) {
|
||||
upgraded.merge(metadata);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub fn from_index_part(timeline_path: &Path, index_part: IndexPart) -> anyhow::Result<Self> {
|
||||
let metadata = TimelineMetadata::from_bytes(&index_part.metadata_bytes)?;
|
||||
let default_metadata = &IndexLayerMetadata::default();
|
||||
|
||||
let find_metadata = |key: &RelativePath| -> LayerFileMetadata {
|
||||
index_part
|
||||
.layer_metadata
|
||||
.get(key)
|
||||
.unwrap_or(default_metadata)
|
||||
.into()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
timeline_layers: index_part
|
||||
.timeline_layers
|
||||
.iter()
|
||||
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
|
||||
.collect(),
|
||||
missing_layers: index_part
|
||||
.missing_layers
|
||||
.iter()
|
||||
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
|
||||
.collect(),
|
||||
metadata,
|
||||
awaits_download: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Metadata gathered for each of the layer files.
|
||||
///
|
||||
/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which
|
||||
/// might have less or more metadata depending if upgrading or rolling back an upgrade.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
pub struct LayerFileMetadata {
|
||||
file_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl From<&'_ IndexLayerMetadata> for LayerFileMetadata {
|
||||
fn from(other: &IndexLayerMetadata) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size: other.file_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LayerFileMetadata {
|
||||
pub fn new(file_size: u64) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size: Some(file_size),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn file_size(&self) -> Option<u64> {
|
||||
self.file_size
|
||||
}
|
||||
|
||||
/// Metadata has holes due to version upgrades. This method is called to upgrade self with the
|
||||
/// other value.
|
||||
///
|
||||
/// This is called on the possibly outdated version.
|
||||
pub fn merge(&mut self, other: &Self) {
|
||||
self.file_size = other.file_size.or(self.file_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Part of the remote index, corresponding to a certain timeline.
|
||||
/// Contains the data about all files in the timeline, present remotely and its metadata.
|
||||
///
|
||||
/// This type needs to be backwards and forwards compatible. When changing the fields,
|
||||
/// remember to add a test case for the changed version.
|
||||
#[serde_as]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct IndexPart {
|
||||
/// Debugging aid describing the version of this type.
|
||||
#[serde(default)]
|
||||
version: usize,
|
||||
|
||||
/// Each of the layers present on remote storage.
|
||||
///
|
||||
/// Additional metadata can might exist in `layer_metadata`.
|
||||
timeline_layers: HashSet<RelativePath>,
|
||||
|
||||
/// Currently is not really used in pageserver,
|
||||
/// present to manually keep track of the layer files that pageserver might never retrieve.
|
||||
///
|
||||
/// Such "holes" might appear if any upload task was evicted on an error threshold:
|
||||
/// the this layer will only be rescheduled for upload on pageserver restart.
|
||||
missing_layers: HashSet<RelativePath>,
|
||||
|
||||
/// Per layer file metadata, which can be present for a present or missing layer file.
|
||||
///
|
||||
/// Older versions of `IndexPart` will not have this property or have only a part of metadata
|
||||
/// that latest version stores.
|
||||
#[serde(default)]
|
||||
layer_metadata: HashMap<RelativePath, IndexLayerMetadata>,
|
||||
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
disk_consistent_lsn: Lsn,
|
||||
metadata_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl IndexPart {
|
||||
/// When adding or modifying any parts of `IndexPart`, increment the version so that it can be
|
||||
/// used to understand later versions.
|
||||
///
|
||||
/// Version is currently informative only.
|
||||
const LATEST_VERSION: usize = 1;
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn new(
|
||||
timeline_layers: HashSet<RelativePath>,
|
||||
missing_layers: HashSet<RelativePath>,
|
||||
disk_consistent_lsn: Lsn,
|
||||
metadata_bytes: Vec<u8>,
|
||||
) -> Self {
|
||||
Self {
|
||||
version: Self::LATEST_VERSION,
|
||||
timeline_layers,
|
||||
missing_layers,
|
||||
layer_metadata: HashMap::default(),
|
||||
disk_consistent_lsn,
|
||||
metadata_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn missing_files(&self) -> &HashSet<RelativePath> {
|
||||
&self.missing_layers
|
||||
}
|
||||
|
||||
pub fn from_remote_timeline(
|
||||
timeline_path: &Path,
|
||||
remote_timeline: RemoteTimeline,
|
||||
) -> anyhow::Result<Self> {
|
||||
let metadata_bytes = remote_timeline.metadata.to_bytes()?;
|
||||
|
||||
let mut layer_metadata = HashMap::new();
|
||||
|
||||
let mut missing_layers = HashSet::new();
|
||||
|
||||
separate_paths_and_metadata(
|
||||
timeline_path,
|
||||
&remote_timeline.missing_layers,
|
||||
&mut missing_layers,
|
||||
&mut layer_metadata,
|
||||
)
|
||||
.context("Failed to convert missing layers' paths to relative ones")?;
|
||||
|
||||
let mut timeline_layers = HashSet::new();
|
||||
|
||||
separate_paths_and_metadata(
|
||||
timeline_path,
|
||||
&remote_timeline.timeline_layers,
|
||||
&mut timeline_layers,
|
||||
&mut layer_metadata,
|
||||
)
|
||||
.context("Failed to convert timeline layers' paths to relative ones")?;
|
||||
|
||||
Ok(Self {
|
||||
version: Self::LATEST_VERSION,
|
||||
timeline_layers,
|
||||
missing_layers,
|
||||
layer_metadata,
|
||||
disk_consistent_lsn: remote_timeline.metadata.disk_consistent_lsn(),
|
||||
metadata_bytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialized form of [`LayerFileMetadata`].
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct IndexLayerMetadata {
|
||||
file_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl From<&'_ LayerFileMetadata> for IndexLayerMetadata {
|
||||
fn from(other: &'_ LayerFileMetadata) -> Self {
|
||||
IndexLayerMetadata {
|
||||
file_size: other.file_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn separate_paths_and_metadata(
|
||||
timeline_path: &Path,
|
||||
input: &HashMap<PathBuf, LayerFileMetadata>,
|
||||
output: &mut HashSet<RelativePath>,
|
||||
layer_metadata: &mut HashMap<RelativePath, IndexLayerMetadata>,
|
||||
) -> anyhow::Result<()> {
|
||||
for (path, metadata) in input {
|
||||
let rel_path = RelativePath::new(timeline_path, path)?;
|
||||
let metadata = IndexLayerMetadata::from(metadata);
|
||||
|
||||
layer_metadata.insert(rel_path.clone(), metadata);
|
||||
output.insert(rel_path);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use super::*;
|
||||
use crate::tenant::harness::{TenantHarness, TIMELINE_ID};
|
||||
use crate::DEFAULT_PG_VERSION;
|
||||
|
||||
#[test]
|
||||
fn index_part_conversion() {
|
||||
let harness = TenantHarness::create("index_part_conversion").unwrap();
|
||||
let timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let metadata = TimelineMetadata::new(
|
||||
Lsn(5).align(),
|
||||
Some(Lsn(4)),
|
||||
None,
|
||||
Lsn(3),
|
||||
Lsn(2),
|
||||
Lsn(1),
|
||||
DEFAULT_PG_VERSION,
|
||||
);
|
||||
let remote_timeline = RemoteTimeline {
|
||||
timeline_layers: HashMap::from([
|
||||
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)),
|
||||
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
|
||||
]),
|
||||
missing_layers: HashMap::from([
|
||||
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)),
|
||||
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
|
||||
]),
|
||||
metadata: metadata.clone(),
|
||||
awaits_download: false,
|
||||
};
|
||||
|
||||
let index_part = IndexPart::from_remote_timeline(&timeline_path, remote_timeline.clone())
|
||||
.expect("Correct remote timeline should be convertible to index part");
|
||||
|
||||
assert_eq!(
|
||||
index_part.timeline_layers.iter().collect::<BTreeSet<_>>(),
|
||||
BTreeSet::from([
|
||||
&RelativePath("layer_1".to_string()),
|
||||
&RelativePath("layer_2".to_string())
|
||||
]),
|
||||
"Index part should have all remote timeline layers after the conversion"
|
||||
);
|
||||
assert_eq!(
|
||||
index_part.missing_layers.iter().collect::<BTreeSet<_>>(),
|
||||
BTreeSet::from([
|
||||
&RelativePath("missing_1".to_string()),
|
||||
&RelativePath("missing_2".to_string())
|
||||
]),
|
||||
"Index part should have all missing remote timeline layers after the conversion"
|
||||
);
|
||||
assert_eq!(
|
||||
index_part.disk_consistent_lsn,
|
||||
metadata.disk_consistent_lsn(),
|
||||
"Index part should have disk consistent lsn from the timeline"
|
||||
);
|
||||
assert_eq!(
|
||||
index_part.metadata_bytes,
|
||||
metadata
|
||||
.to_bytes()
|
||||
.expect("Failed to serialize correct metadata into bytes"),
|
||||
"Index part should have all missing remote timeline layers after the conversion"
|
||||
);
|
||||
|
||||
let restored_timeline = RemoteTimeline::from_index_part(&timeline_path, index_part)
|
||||
.expect("Correct index part should be convertible to remote timeline");
|
||||
|
||||
let original_metadata = &remote_timeline.metadata;
|
||||
let restored_metadata = &restored_timeline.metadata;
|
||||
// we have to compare the metadata this way, since its header is different after creation and restoration,
|
||||
// but that is now consireded ok.
|
||||
assert_eq!(
|
||||
original_metadata.disk_consistent_lsn(),
|
||||
restored_metadata.disk_consistent_lsn(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
assert_eq!(
|
||||
original_metadata.prev_record_lsn(),
|
||||
restored_metadata.prev_record_lsn(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
assert_eq!(
|
||||
original_metadata.ancestor_timeline(),
|
||||
restored_metadata.ancestor_timeline(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
assert_eq!(
|
||||
original_metadata.ancestor_lsn(),
|
||||
restored_metadata.ancestor_lsn(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
assert_eq!(
|
||||
original_metadata.latest_gc_cutoff_lsn(),
|
||||
restored_metadata.latest_gc_cutoff_lsn(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
assert_eq!(
|
||||
original_metadata.initdb_lsn(),
|
||||
restored_metadata.initdb_lsn(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not alter metadata"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
remote_timeline.awaits_download, restored_timeline.awaits_download,
|
||||
"remote timeline -> index part -> remote timeline conversion should not loose download flag"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
remote_timeline
|
||||
.timeline_layers
|
||||
.into_iter()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
restored_timeline
|
||||
.timeline_layers
|
||||
.into_iter()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not loose layer data"
|
||||
);
|
||||
assert_eq!(
|
||||
remote_timeline
|
||||
.missing_layers
|
||||
.into_iter()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
restored_timeline
|
||||
.missing_layers
|
||||
.into_iter()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
"remote timeline -> index part -> remote timeline conversion should not loose missing file data"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn index_part_conversion_negatives() {
|
||||
let harness = TenantHarness::create("index_part_conversion_negatives").unwrap();
|
||||
let timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let metadata = TimelineMetadata::new(
|
||||
Lsn(5).align(),
|
||||
Some(Lsn(4)),
|
||||
None,
|
||||
Lsn(3),
|
||||
Lsn(2),
|
||||
Lsn(1),
|
||||
DEFAULT_PG_VERSION,
|
||||
);
|
||||
|
||||
let conversion_result = IndexPart::from_remote_timeline(
|
||||
&timeline_path,
|
||||
RemoteTimeline {
|
||||
timeline_layers: HashMap::from([
|
||||
(PathBuf::from("bad_path"), LayerFileMetadata::new(1)),
|
||||
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
|
||||
]),
|
||||
missing_layers: HashMap::from([
|
||||
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)),
|
||||
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
|
||||
]),
|
||||
metadata: metadata.clone(),
|
||||
awaits_download: false,
|
||||
},
|
||||
);
|
||||
assert!(conversion_result.is_err(), "Should not be able to convert metadata with layer paths that are not in the timeline directory");
|
||||
|
||||
let conversion_result = IndexPart::from_remote_timeline(
|
||||
&timeline_path,
|
||||
RemoteTimeline {
|
||||
timeline_layers: HashMap::from([
|
||||
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)),
|
||||
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
|
||||
]),
|
||||
missing_layers: HashMap::from([
|
||||
(PathBuf::from("bad_path"), LayerFileMetadata::new(3)),
|
||||
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
|
||||
]),
|
||||
metadata,
|
||||
awaits_download: false,
|
||||
},
|
||||
);
|
||||
assert!(conversion_result.is_err(), "Should not be able to convert metadata with missing layer paths that are not in the timeline directory");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v0_indexpart_is_parsed() {
|
||||
let example = r#"{
|
||||
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
||||
"missing_layers":["not_a_real_layer_but_adding_coverage"],
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 0,
|
||||
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
|
||||
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
|
||||
layer_metadata: HashMap::default(),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
|
||||
};
|
||||
|
||||
let part = serde_json::from_str::<IndexPart>(example).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v1_indexpart_is_parsed() {
|
||||
let example = r#"{
|
||||
"version":1,
|
||||
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
||||
"missing_layers":["not_a_real_layer_but_adding_coverage"],
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
|
||||
version: 1,
|
||||
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
|
||||
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
|
||||
layer_metadata: HashMap::from([
|
||||
(RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned()), IndexLayerMetadata {
|
||||
file_size: Some(25600000),
|
||||
}),
|
||||
(RelativePath("not_a_real_layer_but_adding_coverage".to_owned()), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: Some(9007199254741001),
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
|
||||
};
|
||||
|
||||
let part = serde_json::from_str::<IndexPart>(example).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
}
|
||||
@@ -1,479 +0,0 @@
|
||||
//! Timeline synchronization logic to compress and upload to the remote storage all new timeline files from the checkpoints.
|
||||
|
||||
use std::{fmt::Debug, path::PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use tokio::fs;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use utils::id::TenantTimelineId;
|
||||
|
||||
use super::{
|
||||
index::{IndexPart, RemoteTimeline},
|
||||
LayersUpload, SyncData, SyncQueue,
|
||||
};
|
||||
use crate::metrics::NO_LAYERS_UPLOAD;
|
||||
use crate::{config::PageServerConf, storage_sync::SyncTask};
|
||||
|
||||
/// Serializes and uploads the given index part data to the remote storage.
|
||||
pub(super) async fn upload_index_part(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
sync_id: TenantTimelineId,
|
||||
index_part: IndexPart,
|
||||
) -> anyhow::Result<()> {
|
||||
let index_part_bytes = serde_json::to_vec(&index_part)
|
||||
.context("Failed to serialize index part file into bytes")?;
|
||||
let index_part_size = index_part_bytes.len();
|
||||
let index_part_bytes = tokio::io::BufReader::new(std::io::Cursor::new(index_part_bytes));
|
||||
|
||||
let index_part_path = conf
|
||||
.metadata_path(sync_id.timeline_id, sync_id.tenant_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
storage
|
||||
.upload_storage_object(
|
||||
Box::new(index_part_bytes),
|
||||
index_part_size,
|
||||
&index_part_path,
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("Failed to upload index part for '{sync_id}'"))
|
||||
}
|
||||
|
||||
/// Timeline upload result, with extra data, needed for uploading.
|
||||
#[derive(Debug)]
|
||||
pub(super) enum UploadedTimeline {
|
||||
/// Upload failed due to some error, the upload task is rescheduled for another retry.
|
||||
FailedAndRescheduled(anyhow::Error),
|
||||
/// No issues happened during the upload, all task files were put into the remote storage.
|
||||
Successful(SyncData<LayersUpload>),
|
||||
}
|
||||
|
||||
/// Attempts to upload given layer files.
|
||||
/// No extra checks for overlapping files is made and any files that are already present remotely will be overwritten, if submitted during the upload.
|
||||
///
|
||||
/// On an error, bumps the retries count and reschedules the entire task.
|
||||
pub(super) async fn upload_timeline_layers<'a>(
|
||||
storage: &'a GenericRemoteStorage,
|
||||
sync_queue: &SyncQueue,
|
||||
remote_timeline: Option<&'a RemoteTimeline>,
|
||||
sync_id: TenantTimelineId,
|
||||
mut upload_data: SyncData<LayersUpload>,
|
||||
) -> UploadedTimeline {
|
||||
let upload = &mut upload_data.data;
|
||||
let new_upload_lsn = upload
|
||||
.metadata
|
||||
.as_ref()
|
||||
.map(|meta| meta.disk_consistent_lsn());
|
||||
|
||||
let already_uploaded_layers = remote_timeline
|
||||
.map(|timeline| {
|
||||
timeline
|
||||
.stored_files()
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<std::collections::HashSet<_>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let layers_to_upload = upload
|
||||
.layers_to_upload
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
if !already_uploaded_layers.contains(k) {
|
||||
Some((k.to_owned(), v.to_owned()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if layers_to_upload.is_empty() {
|
||||
debug!("No layers to upload after filtering, aborting");
|
||||
NO_LAYERS_UPLOAD
|
||||
.with_label_values(&[
|
||||
&sync_id.tenant_id.to_string(),
|
||||
&sync_id.timeline_id.to_string(),
|
||||
])
|
||||
.inc();
|
||||
return UploadedTimeline::Successful(upload_data);
|
||||
}
|
||||
|
||||
debug!("Layers to upload: {layers_to_upload:?}");
|
||||
info!(
|
||||
"Uploading {} timeline layers, new lsn: {new_upload_lsn:?}",
|
||||
layers_to_upload.len(),
|
||||
);
|
||||
|
||||
let mut upload_tasks = layers_to_upload
|
||||
.into_iter()
|
||||
.map(|(source_path, known_metadata)| async move {
|
||||
let source_file = match fs::File::open(&source_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to upen a source file for layer '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
}) {
|
||||
Ok(file) => file,
|
||||
Err(e) => return Err(UploadError::MissingLocalFile(source_path, e)),
|
||||
};
|
||||
|
||||
let fs_size = source_file
|
||||
.metadata()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get the source file metadata for layer '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(UploadError::Other)?
|
||||
.len();
|
||||
|
||||
// FIXME: this looks bad
|
||||
if let Some(metadata_size) = known_metadata.file_size() {
|
||||
if metadata_size != fs_size {
|
||||
return Err(UploadError::Other(anyhow::anyhow!(
|
||||
"File {source_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}"
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// this is a silly state we would like to avoid
|
||||
}
|
||||
|
||||
let fs_size = usize::try_from(fs_size).with_context(|| format!("File {source_path:?} size {fs_size} could not be converted to usize"))
|
||||
.map_err(UploadError::Other)?;
|
||||
|
||||
match storage
|
||||
.upload_storage_object(Box::new(source_file), fs_size, &source_path)
|
||||
.await
|
||||
.with_context(|| format!("Failed to upload layer file for {sync_id}"))
|
||||
{
|
||||
Ok(()) => Ok(source_path),
|
||||
Err(e) => Err(UploadError::MissingLocalFile(source_path, e)),
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
let mut errors = Vec::new();
|
||||
while let Some(upload_result) = upload_tasks.next().await {
|
||||
match upload_result {
|
||||
Ok(uploaded_path) => {
|
||||
let metadata = upload
|
||||
.layers_to_upload
|
||||
.remove(&uploaded_path)
|
||||
.expect("metadata should always exist, assuming no double uploads");
|
||||
upload.uploaded_layers.insert(uploaded_path, metadata);
|
||||
}
|
||||
Err(e) => match e {
|
||||
UploadError::Other(e) => {
|
||||
error!("Failed to upload a layer for timeline {sync_id}: {e:?}");
|
||||
errors.push(format!("{e:#}"));
|
||||
}
|
||||
UploadError::MissingLocalFile(source_path, e) => {
|
||||
if source_path.exists() {
|
||||
error!("Failed to upload a layer for timeline {sync_id}: {e:?}");
|
||||
errors.push(format!("{e:#}"));
|
||||
} else {
|
||||
// We have run the upload sync task, but the file we wanted to upload is gone.
|
||||
// This is "fine" due the asynchronous nature of the sync loop: it only reacts to events and might need to
|
||||
// retry the upload tasks, if S3 or network is down: but during this time, pageserver might still operate and
|
||||
// run compaction/gc tasks, removing redundant files from disk.
|
||||
// It's not good to pause GC/compaction because of those and we would rather skip such uploads.
|
||||
//
|
||||
// Yet absence of such files might also mean that the timeline metadata file was updated (GC moves the Lsn forward, for instance).
|
||||
// We don't try to read a more recent version, since it could contain `disk_consistent_lsn` that does not have its upload finished yet.
|
||||
// This will create "missing" layers and make data inconsistent.
|
||||
// Instead, we only update the metadata when it was submitted in an upload task as a checkpoint result.
|
||||
upload.layers_to_upload.remove(&source_path);
|
||||
warn!(
|
||||
"Missing locally a layer file {} scheduled for upload, skipping",
|
||||
source_path.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
info!("Successfully uploaded all layers");
|
||||
UploadedTimeline::Successful(upload_data)
|
||||
} else {
|
||||
debug!("Reenqueuing failed upload task for timeline {sync_id}");
|
||||
upload_data.retries += 1;
|
||||
sync_queue.push(sync_id, SyncTask::Upload(upload_data));
|
||||
UploadedTimeline::FailedAndRescheduled(anyhow::anyhow!(
|
||||
"Errors appeared during layer uploads: {:?}",
|
||||
errors
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
enum UploadError {
|
||||
MissingLocalFile(PathBuf, anyhow::Error),
|
||||
Other(anyhow::Error),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{
|
||||
collections::{BTreeSet, HashSet},
|
||||
num::NonZeroUsize,
|
||||
};
|
||||
|
||||
use remote_storage::{LocalFs, RemoteStorage};
|
||||
use tempfile::tempdir;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use crate::{
|
||||
storage_sync::{
|
||||
index::RelativePath,
|
||||
test_utils::{create_local_timeline, dummy_metadata},
|
||||
},
|
||||
tenant::harness::{TenantHarness, TIMELINE_ID},
|
||||
};
|
||||
|
||||
use super::{upload_index_part, *};
|
||||
|
||||
#[tokio::test]
|
||||
async fn regular_layer_upload() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("regular_layer_upload")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
|
||||
let layer_files = ["a", "b"];
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_path_buf(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
let current_retries = 3;
|
||||
let metadata = dummy_metadata(Lsn(0x30));
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let mut timeline_upload =
|
||||
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
|
||||
timeline_upload.metadata = None;
|
||||
|
||||
assert!(
|
||||
local_storage.list().await?.is_empty(),
|
||||
"Storage should be empty before any uploads are made"
|
||||
);
|
||||
|
||||
let upload_result = upload_timeline_layers(
|
||||
&storage,
|
||||
&sync_queue,
|
||||
None,
|
||||
sync_id,
|
||||
SyncData::new(current_retries, timeline_upload.clone()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let upload_data = match upload_result {
|
||||
UploadedTimeline::Successful(upload_data) => upload_data,
|
||||
wrong_result => {
|
||||
panic!("Expected a successful upload for timeline, but got: {wrong_result:?}")
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
current_retries, upload_data.retries,
|
||||
"On successful upload, retries are not expected to change"
|
||||
);
|
||||
let upload = &upload_data.data;
|
||||
assert!(
|
||||
upload.layers_to_upload.is_empty(),
|
||||
"Successful upload should have no layers left to upload"
|
||||
);
|
||||
assert_eq!(
|
||||
upload
|
||||
.uploaded_layers
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
layer_files
|
||||
.iter()
|
||||
.map(|layer_file| local_timeline_path.join(layer_file))
|
||||
.collect(),
|
||||
"Successful upload should have all layers uploaded"
|
||||
);
|
||||
assert_eq!(
|
||||
upload.metadata, None,
|
||||
"Successful upload without metadata should not have it returned either"
|
||||
);
|
||||
|
||||
let storage_files = local_storage.list().await?;
|
||||
assert_eq!(
|
||||
storage_files.len(),
|
||||
layer_files.len(),
|
||||
"All layers should be uploaded"
|
||||
);
|
||||
assert_eq!(
|
||||
storage_files
|
||||
.into_iter()
|
||||
.map(|storage_path| local_storage.local_path(&storage_path))
|
||||
.collect::<anyhow::Result<BTreeSet<_>>>()?,
|
||||
layer_files
|
||||
.into_iter()
|
||||
.map(|file| local_timeline_path.join(file))
|
||||
.collect(),
|
||||
"Uploaded files should match with the local ones"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Currently, GC can run between upload retries, removing local layers scheduled for upload. Test this scenario.
|
||||
#[tokio::test]
|
||||
async fn layer_upload_after_local_fs_update() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("layer_upload_after_local_fs_update")?;
|
||||
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
|
||||
let layer_files = ["a1", "b1"];
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_owned(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
let current_retries = 5;
|
||||
let metadata = dummy_metadata(Lsn(0x40));
|
||||
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let layers_to_upload = {
|
||||
let mut layers = layer_files.to_vec();
|
||||
layers.push("layer_to_remove");
|
||||
layers
|
||||
};
|
||||
let timeline_upload =
|
||||
create_local_timeline(&harness, TIMELINE_ID, &layers_to_upload, metadata.clone())
|
||||
.await?;
|
||||
assert!(
|
||||
local_storage.list().await?.is_empty(),
|
||||
"Storage should be empty before any uploads are made"
|
||||
);
|
||||
|
||||
fs::remove_file(local_timeline_path.join("layer_to_remove")).await?;
|
||||
|
||||
let upload_result = upload_timeline_layers(
|
||||
&storage,
|
||||
&sync_queue,
|
||||
None,
|
||||
sync_id,
|
||||
SyncData::new(current_retries, timeline_upload.clone()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let upload_data = match upload_result {
|
||||
UploadedTimeline::Successful(upload_data) => upload_data,
|
||||
wrong_result => panic!(
|
||||
"Expected a successful after local fs upload for timeline, but got: {wrong_result:?}"
|
||||
),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
current_retries, upload_data.retries,
|
||||
"On successful upload, retries are not expected to change"
|
||||
);
|
||||
let upload = &upload_data.data;
|
||||
assert!(
|
||||
upload.layers_to_upload.is_empty(),
|
||||
"Successful upload should have no layers left to upload, even those that were removed from the local fs"
|
||||
);
|
||||
assert_eq!(
|
||||
upload
|
||||
.uploaded_layers
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<BTreeSet<_>>(),
|
||||
layer_files
|
||||
.iter()
|
||||
.map(|layer_file| local_timeline_path.join(layer_file))
|
||||
.collect(),
|
||||
"Successful upload should have all layers uploaded"
|
||||
);
|
||||
assert_eq!(
|
||||
upload.metadata,
|
||||
Some(metadata),
|
||||
"Successful upload should not change its metadata"
|
||||
);
|
||||
|
||||
let storage_files = local_storage.list().await?;
|
||||
assert_eq!(
|
||||
storage_files.len(),
|
||||
layer_files.len(),
|
||||
"All layers should be uploaded"
|
||||
);
|
||||
assert_eq!(
|
||||
storage_files
|
||||
.into_iter()
|
||||
.map(|storage_path| local_storage.local_path(&storage_path))
|
||||
.collect::<anyhow::Result<BTreeSet<_>>>()?,
|
||||
layer_files
|
||||
.into_iter()
|
||||
.map(|file| local_timeline_path.join(file))
|
||||
.collect(),
|
||||
"Uploaded files should match with the local ones"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upload_index_part() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_upload_index_part")?;
|
||||
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
|
||||
|
||||
let storage = GenericRemoteStorage::new(LocalFs::new(
|
||||
tempdir()?.path().to_owned(),
|
||||
harness.conf.workdir.clone(),
|
||||
)?);
|
||||
let local_storage = storage.as_local().unwrap();
|
||||
let metadata = dummy_metadata(Lsn(0x40));
|
||||
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
|
||||
let index_part = IndexPart::new(
|
||||
HashSet::from([
|
||||
RelativePath::new(&local_timeline_path, local_timeline_path.join("one"))?,
|
||||
RelativePath::new(&local_timeline_path, local_timeline_path.join("two"))?,
|
||||
]),
|
||||
HashSet::from([RelativePath::new(
|
||||
&local_timeline_path,
|
||||
local_timeline_path.join("three"),
|
||||
)?]),
|
||||
metadata.disk_consistent_lsn(),
|
||||
metadata.to_bytes()?,
|
||||
);
|
||||
|
||||
assert!(
|
||||
local_storage.list().await?.is_empty(),
|
||||
"Storage should be empty before any uploads are made"
|
||||
);
|
||||
upload_index_part(harness.conf, &storage, sync_id, index_part.clone()).await?;
|
||||
|
||||
let storage_files = local_storage.list().await?;
|
||||
assert_eq!(
|
||||
storage_files.len(),
|
||||
1,
|
||||
"Should have only the index part file uploaded"
|
||||
);
|
||||
|
||||
let index_part_path = storage_files.first().unwrap();
|
||||
assert_eq!(
|
||||
index_part_path.object_name(),
|
||||
Some(IndexPart::FILE_NAME),
|
||||
"Remote index part should have the correct name"
|
||||
);
|
||||
let remote_index_part: IndexPart = serde_json::from_slice(
|
||||
&fs::read(local_storage.resolve_in_storage(index_part_path)?).await?,
|
||||
)?;
|
||||
assert_eq!(
|
||||
index_part, remote_index_part,
|
||||
"Remote index part should match the local one"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
1256
pageserver/src/storage_sync2.rs
Normal file
1256
pageserver/src/storage_sync2.rs
Normal file
File diff suppressed because it is too large
Load Diff
38
pageserver/src/storage_sync2/delete.rs
Normal file
38
pageserver/src/storage_sync2/delete.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
//! Helper functions to delete files from remote storage with a RemoteStorage
|
||||
use anyhow::Context;
|
||||
use std::path::Path;
|
||||
use tracing::debug;
|
||||
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
|
||||
pub(super) async fn delete_layer(
|
||||
storage: &GenericRemoteStorage,
|
||||
local_layer_path: &Path,
|
||||
) -> anyhow::Result<()> {
|
||||
fail::fail_point!("before-delete-layer", |_| {
|
||||
anyhow::bail!("failpoint before-delete-layer")
|
||||
});
|
||||
debug!(
|
||||
"Deleting layer from remote storage: {:?}",
|
||||
local_layer_path.display()
|
||||
);
|
||||
|
||||
let storage_path = storage
|
||||
.remote_object_id(local_layer_path)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get the layer storage path for local path '{}'",
|
||||
local_layer_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
// XXX: If the deletion fails because the object already didn't exist,
|
||||
// it would be good to just issue a warning but consider it success.
|
||||
// https://github.com/neondatabase/neon/issues/2934
|
||||
storage.delete(&storage_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to delete remote layer from storage at '{:?}'",
|
||||
storage_path
|
||||
)
|
||||
})
|
||||
}
|
||||
257
pageserver/src/storage_sync2/download.rs
Normal file
257
pageserver/src/storage_sync2/download.rs
Normal file
@@ -0,0 +1,257 @@
|
||||
//! Helper functions to download files from remote storage with a RemoteStorage
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::config::PageServerConf;
|
||||
use crate::storage_sync::index::LayerFileMetadata;
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage};
|
||||
use utils::crashsafe::path_with_suffix_extension;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
use super::index::IndexPart;
|
||||
use super::RelativePath;
|
||||
|
||||
async fn fsync_path(path: impl AsRef<std::path::Path>) -> Result<(), std::io::Error> {
|
||||
fs::File::open(path).await?.sync_all().await
|
||||
}
|
||||
|
||||
///
|
||||
/// If 'metadata' is given, we will validate that the downloaded file's size matches that
|
||||
/// in the metadata. (In the future, we might do more cross-checks, like CRC validation)
|
||||
///
|
||||
/// Returns the size of the downloaded file.
|
||||
pub async fn download_layer_file<'a>(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &'a GenericRemoteStorage,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
path: &'a RelativePath,
|
||||
layer_metadata: &'a LayerFileMetadata,
|
||||
) -> anyhow::Result<u64> {
|
||||
let timeline_path = conf.timeline_path(&timeline_id, &tenant_id);
|
||||
|
||||
let local_path = path.to_local_path(&timeline_path);
|
||||
|
||||
let layer_storage_path = storage.remote_object_id(&local_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to get the layer storage path for local path '{}'",
|
||||
local_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
// Perform a rename inspired by durable_rename from file_utils.c.
|
||||
// The sequence:
|
||||
// write(tmp)
|
||||
// fsync(tmp)
|
||||
// rename(tmp, new)
|
||||
// fsync(new)
|
||||
// fsync(parent)
|
||||
// For more context about durable_rename check this email from postgres mailing list:
|
||||
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
|
||||
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
|
||||
let temp_file_path = path_with_suffix_extension(&local_path, TEMP_DOWNLOAD_EXTENSION);
|
||||
|
||||
// TODO: this doesn't use the cached fd for some reason?
|
||||
let mut destination_file = fs::File::create(&temp_file_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to create a destination file for layer '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
let mut download = storage
|
||||
.download(&layer_storage_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open a download stream for layer with remote storage path '{layer_storage_path:?}'"
|
||||
)
|
||||
})?;
|
||||
let bytes_amount = tokio::io::copy(&mut download.download_stream, &mut destination_file).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to download layer with remote storage path '{layer_storage_path:?}' into file '{}'", temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
|
||||
// A file will not be closed immediately when it goes out of scope if there are any IO operations
|
||||
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
|
||||
// you should call flush before dropping it.
|
||||
//
|
||||
// From the tokio code I see that it waits for pending operations to complete. There shouldt be any because
|
||||
// we assume that `destination_file` file is fully written. I e there is no pending .write(...).await operations.
|
||||
// But for additional safety lets check/wait for any pending operations.
|
||||
destination_file.flush().await.with_context(|| {
|
||||
format!(
|
||||
"failed to flush source file at {}",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
match layer_metadata.file_size() {
|
||||
Some(expected) if expected != bytes_amount => {
|
||||
anyhow::bail!(
|
||||
"According to layer file metadata should had downloaded {expected} bytes but downloaded {bytes_amount} bytes into file '{}'",
|
||||
temp_file_path.display()
|
||||
);
|
||||
}
|
||||
Some(_) | None => {
|
||||
// matches, or upgrading from an earlier IndexPart version
|
||||
}
|
||||
}
|
||||
|
||||
// not using sync_data because it can lose file size update
|
||||
destination_file.sync_all().await.with_context(|| {
|
||||
format!(
|
||||
"failed to fsync source file at {}",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
drop(destination_file);
|
||||
|
||||
fail::fail_point!("remote-storage-download-pre-rename", |_| {
|
||||
bail!("remote-storage-download-pre-rename failpoint triggered")
|
||||
});
|
||||
|
||||
fs::rename(&temp_file_path, &local_path).await?;
|
||||
|
||||
fsync_path(&local_path)
|
||||
.await
|
||||
.with_context(|| format!("Could not fsync layer file {}", local_path.display(),))?;
|
||||
|
||||
tracing::info!("download complete: {}", local_path.display());
|
||||
|
||||
Ok(bytes_amount)
|
||||
}
|
||||
|
||||
const TEMP_DOWNLOAD_EXTENSION: &str = "temp_download";
|
||||
|
||||
pub fn is_temp_download_file(path: &Path) -> bool {
|
||||
let extension = path.extension().map(|pname| {
|
||||
pname
|
||||
.to_str()
|
||||
.expect("paths passed to this function must be valid Rust strings")
|
||||
});
|
||||
match extension {
|
||||
Some(TEMP_DOWNLOAD_EXTENSION) => true,
|
||||
Some(_) => false,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// List timelines of given tenant in remote storage
|
||||
pub async fn list_remote_timelines<'a>(
|
||||
storage: &'a GenericRemoteStorage,
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<Vec<(TimelineId, IndexPart)>> {
|
||||
let tenant_path = conf.timelines_path(&tenant_id);
|
||||
let tenant_storage_path = storage.remote_object_id(&tenant_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to get tenant storage path for local path '{}'",
|
||||
tenant_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let timelines = storage
|
||||
.list_prefixes(Some(&tenant_storage_path))
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to list tenant storage path {tenant_storage_path:?} to get remote timelines to download"
|
||||
)
|
||||
})?;
|
||||
|
||||
if timelines.is_empty() {
|
||||
anyhow::bail!("no timelines found on the remote storage")
|
||||
}
|
||||
|
||||
let mut timeline_ids = HashSet::new();
|
||||
let mut part_downloads = FuturesUnordered::new();
|
||||
|
||||
for timeline_remote_storage_key in timelines {
|
||||
let object_name = timeline_remote_storage_key.object_name().ok_or_else(|| {
|
||||
anyhow::anyhow!("failed to get timeline id for remote tenant {tenant_id}")
|
||||
})?;
|
||||
|
||||
let timeline_id: TimelineId = object_name.parse().with_context(|| {
|
||||
format!("failed to parse object name into timeline id '{object_name}'")
|
||||
})?;
|
||||
|
||||
// list_prefixes returns all files with the prefix. If we haven't seen this timeline ID
|
||||
// yet, launch a download task for it.
|
||||
if !timeline_ids.contains(&timeline_id) {
|
||||
timeline_ids.insert(timeline_id);
|
||||
let storage_clone = storage.clone();
|
||||
part_downloads.push(async move {
|
||||
(
|
||||
timeline_id,
|
||||
download_index_part(conf, &storage_clone, tenant_id, timeline_id).await,
|
||||
)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all the download tasks to complete.
|
||||
let mut timeline_parts = Vec::new();
|
||||
while let Some((timeline_id, part_upload_result)) = part_downloads.next().await {
|
||||
let index_part = part_upload_result
|
||||
.with_context(|| format!("Failed to fetch index part for timeline {timeline_id}"))?;
|
||||
|
||||
debug!("Successfully fetched index part for timeline {timeline_id}");
|
||||
timeline_parts.push((timeline_id, index_part));
|
||||
}
|
||||
Ok(timeline_parts)
|
||||
}
|
||||
|
||||
pub async fn download_index_part(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
) -> Result<IndexPart, DownloadError> {
|
||||
let index_part_path = conf
|
||||
.metadata_path(timeline_id, tenant_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
let part_storage_path = storage
|
||||
.remote_object_id(&index_part_path)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get the index part storage path for local path '{}'",
|
||||
index_part_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::BadInput)?;
|
||||
|
||||
let mut index_part_download = storage.download(&part_storage_path).await?;
|
||||
|
||||
let mut index_part_bytes = Vec::new();
|
||||
tokio::io::copy(
|
||||
&mut index_part_download.download_stream,
|
||||
&mut index_part_bytes,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to download an index part into file '{}'",
|
||||
index_part_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
let index_part: IndexPart = serde_json::from_slice(&index_part_bytes)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to deserialize index part file into file '{}'",
|
||||
index_part_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
Ok(index_part)
|
||||
}
|
||||
248
pageserver/src/storage_sync2/index.rs
Normal file
248
pageserver/src/storage_sync2/index.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
//! In-memory index to track the tenant files on the remote storage.
|
||||
//! Able to restore itself from the storage index parts, that are located in every timeline's remote directory and contain all data about
|
||||
//! remote timeline layers and its metadata.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{Context, Ok};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
|
||||
use crate::tenant::metadata::TimelineMetadata;
|
||||
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
/// A part of the filesystem path, that needs a root to become a path again.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct RelativePath(String);
|
||||
|
||||
impl RelativePath {
|
||||
/// Attempts to strip off the base from path, producing a relative path or an error.
|
||||
pub fn from_local_path(timeline_path: &Path, path: &Path) -> anyhow::Result<RelativePath> {
|
||||
let relative = path.strip_prefix(timeline_path).with_context(|| {
|
||||
format!(
|
||||
"path '{}' is not relative to base '{}'",
|
||||
path.display(),
|
||||
timeline_path.display()
|
||||
)
|
||||
})?;
|
||||
Ok(Self::from_filename(relative))
|
||||
}
|
||||
|
||||
pub fn from_filename(path: &Path) -> RelativePath {
|
||||
RelativePath(path.to_string_lossy().to_string())
|
||||
}
|
||||
|
||||
pub fn to_local_path(&self, timeline_path: &Path) -> PathBuf {
|
||||
timeline_path.join(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Metadata gathered for each of the layer files.
|
||||
///
|
||||
/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which
|
||||
/// might have less or more metadata depending if upgrading or rolling back an upgrade.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
pub struct LayerFileMetadata {
|
||||
file_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl From<&'_ IndexLayerMetadata> for LayerFileMetadata {
|
||||
fn from(other: &IndexLayerMetadata) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size: other.file_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LayerFileMetadata {
|
||||
pub fn new(file_size: u64) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size: Some(file_size),
|
||||
}
|
||||
}
|
||||
|
||||
/// This is used to initialize the metadata for remote layers, for which
|
||||
/// the metadata was missing from the index part file.
|
||||
pub const MISSING: Self = LayerFileMetadata { file_size: None };
|
||||
|
||||
pub fn file_size(&self) -> Option<u64> {
|
||||
self.file_size
|
||||
}
|
||||
|
||||
/// Metadata has holes due to version upgrades. This method is called to upgrade self with the
|
||||
/// other value.
|
||||
///
|
||||
/// This is called on the possibly outdated version.
|
||||
pub fn merge(&mut self, other: &Self) {
|
||||
self.file_size = other.file_size.or(self.file_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// In-memory representation of an `index_part.json` file
|
||||
///
|
||||
/// Contains the data about all files in the timeline, present remotely and its metadata.
|
||||
///
|
||||
/// This type needs to be backwards and forwards compatible. When changing the fields,
|
||||
/// remember to add a test case for the changed version.
|
||||
#[serde_as]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct IndexPart {
|
||||
/// Debugging aid describing the version of this type.
|
||||
#[serde(default)]
|
||||
version: usize,
|
||||
|
||||
/// Each of the layers present on remote storage.
|
||||
///
|
||||
/// Additional metadata can might exist in `layer_metadata`.
|
||||
pub timeline_layers: HashSet<RelativePath>,
|
||||
|
||||
/// FIXME: unused field. This should be removed, but that changes the on-disk format,
|
||||
/// so we need to make sure we're backwards- (and maybe forwards-) compatible
|
||||
missing_layers: HashSet<RelativePath>,
|
||||
|
||||
/// Per layer file metadata, which can be present for a present or missing layer file.
|
||||
///
|
||||
/// Older versions of `IndexPart` will not have this property or have only a part of metadata
|
||||
/// that latest version stores.
|
||||
#[serde(default)]
|
||||
pub layer_metadata: HashMap<RelativePath, IndexLayerMetadata>,
|
||||
|
||||
// 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata.
|
||||
// It's duplicated here for convenience.
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
pub disk_consistent_lsn: Lsn,
|
||||
metadata_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl IndexPart {
|
||||
/// When adding or modifying any parts of `IndexPart`, increment the version so that it can be
|
||||
/// used to understand later versions.
|
||||
///
|
||||
/// Version is currently informative only.
|
||||
const LATEST_VERSION: usize = 1;
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
pub fn new(
|
||||
layers_and_metadata: HashMap<RelativePath, LayerFileMetadata>,
|
||||
disk_consistent_lsn: Lsn,
|
||||
metadata_bytes: Vec<u8>,
|
||||
) -> Self {
|
||||
let mut timeline_layers = HashSet::new();
|
||||
let mut layer_metadata = HashMap::new();
|
||||
|
||||
separate_paths_and_metadata(
|
||||
&layers_and_metadata,
|
||||
&mut timeline_layers,
|
||||
&mut layer_metadata,
|
||||
);
|
||||
|
||||
Self {
|
||||
version: Self::LATEST_VERSION,
|
||||
timeline_layers,
|
||||
missing_layers: HashSet::new(),
|
||||
layer_metadata,
|
||||
disk_consistent_lsn,
|
||||
metadata_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_metadata(&self) -> anyhow::Result<TimelineMetadata> {
|
||||
TimelineMetadata::from_bytes(&self.metadata_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialized form of [`LayerFileMetadata`].
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct IndexLayerMetadata {
|
||||
file_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl From<&'_ LayerFileMetadata> for IndexLayerMetadata {
|
||||
fn from(other: &'_ LayerFileMetadata) -> Self {
|
||||
IndexLayerMetadata {
|
||||
file_size: other.file_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn separate_paths_and_metadata(
|
||||
input: &HashMap<RelativePath, LayerFileMetadata>,
|
||||
output: &mut HashSet<RelativePath>,
|
||||
layer_metadata: &mut HashMap<RelativePath, IndexLayerMetadata>,
|
||||
) {
|
||||
for (path, metadata) in input {
|
||||
let metadata = IndexLayerMetadata::from(metadata);
|
||||
layer_metadata.insert(path.clone(), metadata);
|
||||
output.insert(path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn v0_indexpart_is_parsed() {
|
||||
let example = r#"{
|
||||
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
||||
"missing_layers":["not_a_real_layer_but_adding_coverage"],
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 0,
|
||||
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
|
||||
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
|
||||
layer_metadata: HashMap::default(),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
|
||||
};
|
||||
|
||||
let part = serde_json::from_str::<IndexPart>(example).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v1_indexpart_is_parsed() {
|
||||
let example = r#"{
|
||||
"version":1,
|
||||
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
||||
"missing_layers":["not_a_real_layer_but_adding_coverage"],
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
|
||||
version: 1,
|
||||
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
|
||||
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
|
||||
layer_metadata: HashMap::from([
|
||||
(RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned()), IndexLayerMetadata {
|
||||
file_size: Some(25600000),
|
||||
}),
|
||||
(RelativePath("not_a_real_layer_but_adding_coverage".to_owned()), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: Some(9007199254741001),
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
|
||||
};
|
||||
|
||||
let part = serde_json::from_str::<IndexPart>(example).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
}
|
||||
104
pageserver/src/storage_sync2/upload.rs
Normal file
104
pageserver/src/storage_sync2/upload.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
//! Helper functions to upload files to remote storage with a RemoteStorage
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use fail::fail_point;
|
||||
use std::path::Path;
|
||||
use tokio::fs;
|
||||
|
||||
use super::index::IndexPart;
|
||||
use crate::config::PageServerConf;
|
||||
use crate::storage_sync::LayerFileMetadata;
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
/// Serializes and uploads the given index part data to the remote storage.
|
||||
pub(super) async fn upload_index_part<'a>(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &'a GenericRemoteStorage,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
index_part: &'a IndexPart,
|
||||
) -> anyhow::Result<()> {
|
||||
fail_point!("before-upload-index", |_| {
|
||||
bail!("failpoint before-upload-index")
|
||||
});
|
||||
let index_part_bytes = serde_json::to_vec(&index_part)
|
||||
.context("Failed to serialize index part file into bytes")?;
|
||||
let index_part_size = index_part_bytes.len();
|
||||
let index_part_bytes = tokio::io::BufReader::new(std::io::Cursor::new(index_part_bytes));
|
||||
|
||||
let index_part_path = conf
|
||||
.metadata_path(timeline_id, tenant_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
storage
|
||||
.upload_storage_object(
|
||||
Box::new(index_part_bytes),
|
||||
index_part_size,
|
||||
&index_part_path,
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("Failed to upload index part for '{tenant_id} / {timeline_id}'"))
|
||||
}
|
||||
|
||||
/// Attempts to upload given layer files.
|
||||
/// No extra checks for overlapping files is made and any files that are already present remotely will be overwritten, if submitted during the upload.
|
||||
///
|
||||
/// On an error, bumps the retries count and reschedules the entire task.
|
||||
pub(super) async fn upload_timeline_layer(
|
||||
storage: &GenericRemoteStorage,
|
||||
source_path: &Path,
|
||||
known_metadata: &LayerFileMetadata,
|
||||
) -> anyhow::Result<()> {
|
||||
fail_point!("before-upload-layer", |_| {
|
||||
bail!("failpoint before-upload-layer")
|
||||
});
|
||||
let storage_path = storage.remote_object_id(source_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to get the layer storage path for local path '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let source_file = fs::File::open(&source_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to open a source file for layer '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let fs_size = source_file
|
||||
.metadata()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get the source file metadata for layer '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})?
|
||||
.len();
|
||||
|
||||
// FIXME: this looks bad
|
||||
if let Some(metadata_size) = known_metadata.file_size() {
|
||||
if metadata_size != fs_size {
|
||||
bail!("File {source_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}");
|
||||
}
|
||||
} else {
|
||||
// this is a silly state we would like to avoid
|
||||
}
|
||||
|
||||
let fs_size = usize::try_from(fs_size).with_context(|| {
|
||||
format!("File {source_path:?} size {fs_size} could not be converted to usize")
|
||||
})?;
|
||||
|
||||
storage
|
||||
.upload(Box::new(source_file), fs_size, &storage_path, None)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload a layer from local path '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -197,8 +197,8 @@ pub enum TaskKind {
|
||||
// Task that flushes frozen in-memory layers to disk
|
||||
LayerFlushTask,
|
||||
|
||||
// Task that manages the remote upload queue
|
||||
StorageSync,
|
||||
// Task that uploads a file to remote storage
|
||||
RemoteUploadTask,
|
||||
|
||||
// task that handles the initial downloading of all tenants
|
||||
InitialLoad,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
407
pageserver/src/tenant/bst_layer_map.rs
Normal file
407
pageserver/src/tenant/bst_layer_map.rs
Normal file
@@ -0,0 +1,407 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Range;
|
||||
|
||||
// TODO drop rpds. So far `im` looks 30% faster.
|
||||
use rpds::RedBlackTreeMapSync;
|
||||
use im::OrdMap;
|
||||
|
||||
/// Layer map implemented using persistent/immutable binary search tree.
|
||||
/// It supports historical queries, but no retroactive inserts. For that
|
||||
/// see RetroactiveLayerMap.
|
||||
///
|
||||
/// Layer type is abstracted as Value to make unit testing easier.
|
||||
pub struct PersistentLayerMap<Value> {
|
||||
/// Mapping key to the latest layer (if any) until the next key.
|
||||
/// We use the Sync version of the map because we want Self to
|
||||
/// be Sync.
|
||||
///
|
||||
/// TODO Separate Head into its own struct LatestLayerMap
|
||||
/// TODO Merge historic with retroactive, into HistoricLayerMap
|
||||
/// TODO Maintain a pair of heads, one for images, one for deltas.
|
||||
/// This way we can query both of them with one BTreeMap query.
|
||||
head: OrdMap<i128, Option<(u64, Value)>>,
|
||||
|
||||
/// All previous states of `self.head`
|
||||
///
|
||||
/// TODO: Sorted Vec + binary search could be slightly faster.
|
||||
historic: BTreeMap<u64, OrdMap<i128, Option<(u64, Value)>>>,
|
||||
}
|
||||
|
||||
impl<Value: std::fmt::Debug> std::fmt::Debug for PersistentLayerMap<Value> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let head_vec: Vec<_> = self.head.iter().collect();
|
||||
write!(f, "PersistentLayerMap: head: {:?}", head_vec)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Default for PersistentLayerMap<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Value: Clone> PersistentLayerMap<Value> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
head: OrdMap::default(),
|
||||
historic: BTreeMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to subdivide the key range without changing any values
|
||||
fn add_node(self: &mut Self, key: i128) {
|
||||
let value = match self.head.range(0..=key).last() {
|
||||
Some((_, Some(v))) => Some(v.clone()),
|
||||
Some((_, None)) => None,
|
||||
None => None,
|
||||
};
|
||||
self.head.insert(key, value);
|
||||
}
|
||||
|
||||
pub fn insert(self: &mut Self, key: Range<i128>, lsn: Range<u64>, value: Value) {
|
||||
// It's only a persistent map, not a retroactive one
|
||||
if let Some(last_entry) = self.historic.iter().rev().next() {
|
||||
let last_lsn = last_entry.0;
|
||||
if lsn.start == *last_lsn {
|
||||
// TODO there are edge cases to take care of
|
||||
}
|
||||
if lsn.start < *last_lsn {
|
||||
panic!("unexpected retroactive insert");
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE The order of the following lines is important!!
|
||||
|
||||
// Add nodes at endpoints
|
||||
self.add_node(key.start);
|
||||
self.add_node(key.end);
|
||||
|
||||
// Raise the height where necessary
|
||||
//
|
||||
// NOTE This loop is worst case O(N), but amortized O(log N) in the special
|
||||
// case when rectangles have no height. In practice I don't think we'll see
|
||||
// the kind of layer intersections needed to trigger O(N) behavior. If we
|
||||
// do it can be fixed using lazy propagation.
|
||||
let mut to_update = Vec::new();
|
||||
let mut to_remove = Vec::new();
|
||||
let mut prev_covered = false;
|
||||
for (k, node) in self.head.range(key.clone()) {
|
||||
let needs_cover = match node {
|
||||
None => true,
|
||||
Some((h, _)) => h < &lsn.end,
|
||||
};
|
||||
if needs_cover {
|
||||
match prev_covered {
|
||||
true => to_remove.push(k.clone()),
|
||||
false => to_update.push(k.clone()),
|
||||
}
|
||||
}
|
||||
prev_covered = needs_cover;
|
||||
}
|
||||
if !prev_covered {
|
||||
to_remove.push(key.end);
|
||||
}
|
||||
for k in to_update {
|
||||
self.head
|
||||
.insert(k.clone(), Some((lsn.end.clone(), value.clone())));
|
||||
}
|
||||
for k in to_remove {
|
||||
self.head.remove(&k);
|
||||
}
|
||||
|
||||
// Remember history. Clone is O(1)
|
||||
self.historic.insert(lsn.start, self.head.clone());
|
||||
}
|
||||
|
||||
pub fn query(self: &Self, key: i128, lsn: u64) -> Option<Value> {
|
||||
let version = self.historic.range(0..=lsn).rev().next()?.1;
|
||||
version
|
||||
.get_prev(&key)?
|
||||
// .range(0..=key).rev().next()?
|
||||
// NOTE The canonical way to do this in other crates is
|
||||
// `.range(0..=key).rev.next()` and `im` supports this
|
||||
// API but it's 2x slower than `.get_prev(&key)`.
|
||||
.1
|
||||
.as_ref()
|
||||
.map(|(_, v)| v.clone())
|
||||
}
|
||||
|
||||
pub fn trim(self: &mut Self, begin: &u64) {
|
||||
self.historic.split_off(begin);
|
||||
self.head = self
|
||||
.historic
|
||||
.iter()
|
||||
.rev()
|
||||
.next()
|
||||
.map(|(_, v)| v.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
}
|
||||
|
||||
/// Basic test for the immutable bst library, just to show usage.
|
||||
#[test]
|
||||
fn test_immutable_bst_dependency() {
|
||||
let map = RedBlackTreeMapSync::<i32, i32>::default();
|
||||
|
||||
let mut v1 = map.clone();
|
||||
let v2 = map.insert(1, 5);
|
||||
|
||||
// We can query current and past versions of key 1
|
||||
assert_eq!(v1.get(&1), None);
|
||||
assert_eq!(v2.get(&1), Some(&5));
|
||||
|
||||
// We can mutate old state, but it creates a branch.
|
||||
// It doesn't retroactively change future versions.
|
||||
v1.insert_mut(2, 6);
|
||||
assert_eq!(v1.get(&2), Some(&6));
|
||||
assert_eq!(v2.get(&2), None);
|
||||
}
|
||||
|
||||
/// This is the most basic test that demonstrates intended usage.
|
||||
/// All layers in this test have height 1.
|
||||
#[test]
|
||||
fn test_persistent_simple() {
|
||||
let mut map = PersistentLayerMap::<String>::new();
|
||||
map.insert(0..5, 100..101, "Layer 1".to_string());
|
||||
map.insert(3..9, 110..111, "Layer 2".to_string());
|
||||
map.insert(5..6, 120..121, "Layer 3".to_string());
|
||||
|
||||
// After Layer 1 insertion
|
||||
assert_eq!(map.query(1, 105), Some("Layer 1".to_string()));
|
||||
assert_eq!(map.query(4, 105), Some("Layer 1".to_string()));
|
||||
|
||||
// After Layer 2 insertion
|
||||
assert_eq!(map.query(4, 115), Some("Layer 2".to_string()));
|
||||
assert_eq!(map.query(8, 115), Some("Layer 2".to_string()));
|
||||
assert_eq!(map.query(11, 115), None);
|
||||
|
||||
// After Layer 3 insertion
|
||||
assert_eq!(map.query(4, 125), Some("Layer 2".to_string()));
|
||||
assert_eq!(map.query(5, 125), Some("Layer 3".to_string()));
|
||||
assert_eq!(map.query(7, 125), Some("Layer 2".to_string()));
|
||||
}
|
||||
|
||||
/// Cover simple off-by-one edge cases
|
||||
#[test]
|
||||
fn test_off_by_one() {
|
||||
let mut map = PersistentLayerMap::<String>::new();
|
||||
map.insert(3..5, 100..110, "Layer 1".to_string());
|
||||
|
||||
// Check different LSNs
|
||||
assert_eq!(map.query(4, 99), None);
|
||||
assert_eq!(map.query(4, 100), Some("Layer 1".to_string()));
|
||||
|
||||
// Check different keys
|
||||
assert_eq!(map.query(2, 105), None);
|
||||
assert_eq!(map.query(3, 105), Some("Layer 1".to_string()));
|
||||
assert_eq!(map.query(4, 105), Some("Layer 1".to_string()));
|
||||
assert_eq!(map.query(5, 105), None);
|
||||
}
|
||||
|
||||
/// Cover edge cases where layers begin or end on the same key
|
||||
#[test]
|
||||
fn test_key_collision() {
|
||||
let mut map = PersistentLayerMap::<String>::new();
|
||||
|
||||
map.insert(3..5, 100..110, "Layer 10".to_string());
|
||||
map.insert(5..8, 100..110, "Layer 11".to_string());
|
||||
|
||||
map.insert(3..4, 200..210, "Layer 20".to_string());
|
||||
|
||||
// Check after layer 11
|
||||
assert_eq!(map.query(2, 105), None);
|
||||
assert_eq!(map.query(3, 105), Some("Layer 10".to_string()));
|
||||
assert_eq!(map.query(5, 105), Some("Layer 11".to_string()));
|
||||
assert_eq!(map.query(7, 105), Some("Layer 11".to_string()));
|
||||
assert_eq!(map.query(8, 105), None);
|
||||
|
||||
// Check after layer 20
|
||||
assert_eq!(map.query(2, 205), None);
|
||||
assert_eq!(map.query(3, 205), Some("Layer 20".to_string()));
|
||||
assert_eq!(map.query(5, 205), Some("Layer 11".to_string()));
|
||||
assert_eq!(map.query(7, 205), Some("Layer 11".to_string()));
|
||||
assert_eq!(map.query(8, 205), None);
|
||||
}
|
||||
|
||||
/// Test when rectangles have nontrivial height and possibly overlap
|
||||
#[test]
|
||||
fn test_persistent_overlapping() {
|
||||
let mut map = PersistentLayerMap::<String>::new();
|
||||
|
||||
// Add 3 key-disjoint layers with varying LSN ranges
|
||||
map.insert(1..2, 100..200, "Layer 1".to_string());
|
||||
map.insert(4..5, 110..200, "Layer 2".to_string());
|
||||
map.insert(7..8, 120..300, "Layer 3".to_string());
|
||||
|
||||
// Add wide and short layer
|
||||
map.insert(0..9, 130..199, "Layer 4".to_string());
|
||||
|
||||
// Add wide layer taller than some
|
||||
map.insert(0..9, 140..201, "Layer 5".to_string());
|
||||
|
||||
// Add wide layer taller than all
|
||||
map.insert(0..9, 150..301, "Layer 6".to_string());
|
||||
|
||||
// After layer 4 insertion
|
||||
assert_eq!(map.query(0, 135), Some("Layer 4".to_string()));
|
||||
assert_eq!(map.query(1, 135), Some("Layer 1".to_string()));
|
||||
assert_eq!(map.query(2, 135), Some("Layer 4".to_string()));
|
||||
assert_eq!(map.query(4, 135), Some("Layer 2".to_string()));
|
||||
assert_eq!(map.query(5, 135), Some("Layer 4".to_string()));
|
||||
assert_eq!(map.query(7, 135), Some("Layer 3".to_string()));
|
||||
assert_eq!(map.query(8, 135), Some("Layer 4".to_string()));
|
||||
|
||||
// After layer 5 insertion
|
||||
assert_eq!(map.query(0, 145), Some("Layer 5".to_string()));
|
||||
assert_eq!(map.query(1, 145), Some("Layer 5".to_string()));
|
||||
assert_eq!(map.query(2, 145), Some("Layer 5".to_string()));
|
||||
assert_eq!(map.query(4, 145), Some("Layer 5".to_string()));
|
||||
assert_eq!(map.query(5, 145), Some("Layer 5".to_string()));
|
||||
assert_eq!(map.query(7, 145), Some("Layer 3".to_string()));
|
||||
assert_eq!(map.query(8, 145), Some("Layer 5".to_string()));
|
||||
|
||||
// After layer 6 insertion
|
||||
assert_eq!(map.query(0, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(1, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(2, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(4, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(5, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(7, 155), Some("Layer 6".to_string()));
|
||||
assert_eq!(map.query(8, 155), Some("Layer 6".to_string()));
|
||||
}
|
||||
|
||||
/// Layer map that supports:
|
||||
/// - efficient historical queries
|
||||
/// - efficient append only updates
|
||||
/// - tombstones and similar methods for non-latest updates
|
||||
/// - compaction/rebuilding to remove tombstones
|
||||
///
|
||||
/// See this for better retroactive techniques we can try
|
||||
/// https://www.youtube.com/watch?v=WqCWghETNDc&t=581s
|
||||
///
|
||||
/// Layer type is abstracted as Value to make unit testing easier.
|
||||
pub struct RetroactiveLayerMap<Value> {
|
||||
/// A persistent layer map that we rebuild when we need to retroactively update
|
||||
map: PersistentLayerMap<Value>,
|
||||
|
||||
/// We buffer insertion into the PersistentLayerMap to decrease the number of rebuilds.
|
||||
/// A value of None means we want to delete this item.
|
||||
buffer: BTreeMap<(u64, u64, i128, i128), Option<Value>>,
|
||||
|
||||
/// All current layers. This is not used for search. Only to make rebuilds easier.
|
||||
layers: BTreeMap<(u64, u64, i128, i128), Value>,
|
||||
}
|
||||
|
||||
impl<Value: std::fmt::Debug> std::fmt::Debug for RetroactiveLayerMap<Value> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "RetroactiveLayerMap: head: {:?}", self.map)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Default for RetroactiveLayerMap<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Value: Clone> RetroactiveLayerMap<Value> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: PersistentLayerMap::<Value>::new(),
|
||||
buffer: BTreeMap::new(),
|
||||
layers: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(self: &mut Self, key: Range<i128>, lsn: Range<u64>, value: Value) {
|
||||
self.buffer.insert(
|
||||
(lsn.start, lsn.end, key.start, key.end),
|
||||
Some(value.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn remove(self: &mut Self, key: Range<i128>, lsn: Range<u64>) {
|
||||
self.buffer
|
||||
.insert((lsn.start, lsn.end, key.start, key.end), None);
|
||||
}
|
||||
|
||||
pub fn rebuild(self: &mut Self) {
|
||||
// Find the first LSN that needs to be rebuilt
|
||||
let rebuild_since: u64 = match self.buffer.iter().next() {
|
||||
Some(((lsn_start, _, _, _), _)) => lsn_start.clone(),
|
||||
None => return, // No need to rebuild if buffer is empty
|
||||
};
|
||||
|
||||
// Apply buffered updates to self.layers
|
||||
self.buffer.retain(|rect, layer| {
|
||||
match layer {
|
||||
Some(l) => {
|
||||
let existing = self.layers.insert(rect.clone(), l.clone());
|
||||
if existing.is_some() {
|
||||
panic!("can't overwrite layer");
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let existing = self.layers.remove(rect);
|
||||
if existing.is_none() {
|
||||
panic!("invalid layer deletion");
|
||||
}
|
||||
}
|
||||
};
|
||||
false
|
||||
});
|
||||
|
||||
// Rebuild
|
||||
self.map.trim(&rebuild_since);
|
||||
for ((lsn_start, lsn_end, key_start, key_end), layer) in
|
||||
self.layers.range((rebuild_since, 0, 0, 0)..)
|
||||
{
|
||||
self.map
|
||||
.insert(*key_start..*key_end, *lsn_start..*lsn_end, layer.clone());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear(self: &mut Self) {
|
||||
self.map.trim(&0);
|
||||
}
|
||||
|
||||
pub fn query(self: &Self, key: i128, lsn: u64) -> Option<Value> {
|
||||
if !self.buffer.is_empty() {
|
||||
panic!("rebuild pls")
|
||||
}
|
||||
|
||||
self.map.query(key, lsn)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retroactive_simple() {
|
||||
let mut map = RetroactiveLayerMap::new();
|
||||
|
||||
// Append some images in increasing LSN order
|
||||
map.insert(0..5, 100..101, "Image 1".to_string());
|
||||
map.insert(3..9, 110..111, "Image 2".to_string());
|
||||
map.insert(4..6, 120..121, "Image 3".to_string());
|
||||
map.insert(8..9, 120..121, "Image 4".to_string());
|
||||
|
||||
// Add a delta layer out of order
|
||||
map.insert(2..5, 105..106, "Delta 1".to_string());
|
||||
|
||||
// Rebuild so we can start querying
|
||||
map.rebuild();
|
||||
|
||||
// Query key 4
|
||||
assert_eq!(map.query(4, 90), None);
|
||||
assert_eq!(map.query(4, 102), Some("Image 1".to_string()));
|
||||
assert_eq!(map.query(4, 107), Some("Delta 1".to_string()));
|
||||
assert_eq!(map.query(4, 115), Some("Image 2".to_string()));
|
||||
assert_eq!(map.query(4, 125), Some("Image 3".to_string()));
|
||||
|
||||
// Remove Image 3
|
||||
map.remove(4..6, 120..121);
|
||||
map.rebuild();
|
||||
|
||||
// Check deletion worked
|
||||
assert_eq!(map.query(4, 125), Some("Image 2".to_string()));
|
||||
assert_eq!(map.query(8, 125), Some("Image 4".to_string()));
|
||||
}
|
||||
@@ -28,6 +28,8 @@ use std::sync::Arc;
|
||||
use tracing::*;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use super::bst_layer_map::RetroactiveLayerMap;
|
||||
|
||||
///
|
||||
/// LayerMap tracks what layers exist on a timeline.
|
||||
///
|
||||
@@ -55,6 +57,11 @@ pub struct LayerMap {
|
||||
/// All the historic layers are kept here
|
||||
historic_layers: RTree<LayerRTreeObject>,
|
||||
|
||||
/// HACK I'm experimenting with a new index to reaplace the RTree. If this
|
||||
/// works out I'll clean up the struct later.
|
||||
index: RetroactiveLayerMap<Arc<dyn Layer>>,
|
||||
images: RetroactiveLayerMap<Arc<dyn Layer>>,
|
||||
|
||||
/// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient.
|
||||
/// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree.
|
||||
l0_delta_layers: Vec<Arc<dyn Layer>>,
|
||||
@@ -241,6 +248,65 @@ impl LayerMap {
|
||||
/// layer.
|
||||
///
|
||||
pub fn search(&self, key: Key, end_lsn: Lsn) -> Result<Option<SearchResult>> {
|
||||
let old = self.search_old(key, end_lsn)?;
|
||||
let new = self.search_new(key, end_lsn)?;
|
||||
match (&old, &new) {
|
||||
(None, None) => {}
|
||||
(None, Some(_)) => panic!("returned Some, expected None"),
|
||||
(Some(_), None) => panic!("returned None, expected Some"),
|
||||
(Some(old), Some(new)) => {
|
||||
// TODO be more verbose and flexible
|
||||
let context = format!("query: key {}, end_lsn: {}", key, end_lsn);
|
||||
assert_eq!(old.layer.filename(), new.layer.filename(), "{}", context);
|
||||
assert_eq!(old.lsn_floor, new.lsn_floor, "{}", context);
|
||||
}
|
||||
}
|
||||
return Ok(new);
|
||||
}
|
||||
|
||||
// HACK just testing correctness
|
||||
fn search_new(&self, key: Key, end_lsn: Lsn) -> Result<Option<SearchResult>> {
|
||||
// TODO I'm making two separate queries, which is 2x the cost, but that
|
||||
// can be avoided in varous ways. Caching latest_image queries is
|
||||
// probably the simplest, but combining the two data structures
|
||||
// might be better.
|
||||
let latest_layer = self.index.query(key.to_i128(), end_lsn.0 - 1);
|
||||
let latest_image = self.images.query(key.to_i128(), end_lsn.0 - 1);
|
||||
|
||||
// Check for exact match
|
||||
let latest_image = if let Some(image) = latest_image {
|
||||
let img_lsn = image.get_lsn_range().start;
|
||||
if Lsn(img_lsn.0 + 1) == end_lsn {
|
||||
return Ok(Some(SearchResult {
|
||||
layer: image,
|
||||
lsn_floor: img_lsn,
|
||||
}));
|
||||
}
|
||||
|
||||
// HACK just to give back ownership of latest_image to parent scope.
|
||||
// There's definitely a cleaner way to do it.
|
||||
Some(image)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
return Ok(latest_layer.map(|layer| {
|
||||
// Compute lsn_floor
|
||||
let mut lsn_floor = layer.get_lsn_range().start;
|
||||
if let Some(image) = latest_image {
|
||||
if layer.is_incremental() {
|
||||
lsn_floor = std::cmp::max(lsn_floor, image.get_lsn_range().start + 1)
|
||||
}
|
||||
}
|
||||
SearchResult {
|
||||
layer,
|
||||
lsn_floor,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
// HACK just testing correctness
|
||||
fn search_old(&self, key: Key, end_lsn: Lsn) -> Result<Option<SearchResult>> {
|
||||
// linear search
|
||||
// Find the latest image layer that covers the given key
|
||||
let mut latest_img: Option<Arc<dyn Layer>> = None;
|
||||
@@ -345,19 +411,50 @@ impl LayerMap {
|
||||
/// Insert an on-disk layer
|
||||
///
|
||||
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
let kr = layer.get_key_range();
|
||||
let lr = layer.get_lsn_range();
|
||||
self.index.insert(
|
||||
kr.start.to_i128()..kr.end.to_i128(),
|
||||
lr.start.0..lr.end.0,
|
||||
Arc::clone(&layer),
|
||||
);
|
||||
if !layer.is_incremental() {
|
||||
self.images.insert(
|
||||
kr.start.to_i128()..kr.end.to_i128(),
|
||||
lr.start.0..lr.end.0,
|
||||
Arc::clone(&layer),
|
||||
);
|
||||
}
|
||||
|
||||
if layer.get_key_range() == (Key::MIN..Key::MAX) {
|
||||
self.l0_delta_layers.push(layer.clone());
|
||||
}
|
||||
// TODO remove this so insert isn't slow. I need it for now for iter_historic()
|
||||
self.historic_layers.insert(LayerRTreeObject::new(layer));
|
||||
NUM_ONDISK_LAYERS.inc();
|
||||
}
|
||||
|
||||
/// Must be called after a batch of insert_historic calls, before querying
|
||||
pub fn rebuild_index(&mut self) {
|
||||
self.index.rebuild();
|
||||
self.images.rebuild();
|
||||
}
|
||||
|
||||
///
|
||||
/// Remove an on-disk layer from the map.
|
||||
///
|
||||
/// This should be called when the corresponding file on disk has been deleted.
|
||||
///
|
||||
pub fn remove_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
let kr = layer.get_key_range();
|
||||
let lr = layer.get_lsn_range();
|
||||
self.index
|
||||
.remove(kr.start.to_i128()..kr.end.to_i128(), lr.start.0..lr.end.0);
|
||||
if !layer.is_incremental() {
|
||||
self.images
|
||||
.remove(kr.start.to_i128()..kr.end.to_i128(), lr.start.0..lr.end.0);
|
||||
}
|
||||
|
||||
if layer.get_key_range() == (Key::MIN..Key::MAX) {
|
||||
let len_before = self.l0_delta_layers.len();
|
||||
|
||||
@@ -586,3 +683,5 @@ impl LayerMap {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add layer map tests
|
||||
|
||||
@@ -242,7 +242,8 @@ pub fn save_metadata(
|
||||
let mut file = VirtualFile::open_with_options(
|
||||
&path,
|
||||
OpenOptions::new().write(true).create_new(first_save),
|
||||
)?;
|
||||
)
|
||||
.context("open_with_options")?;
|
||||
|
||||
let metadata_bytes = data.to_bytes().context("Failed to get metadata bytes")?;
|
||||
|
||||
@@ -264,6 +265,26 @@ pub fn save_metadata(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_metadata(
|
||||
conf: &'static PageServerConf,
|
||||
timeline_id: TimelineId,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<TimelineMetadata> {
|
||||
let metadata_path = conf.metadata_path(timeline_id, tenant_id);
|
||||
let metadata_bytes = std::fs::read(&metadata_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to read metadata bytes from path {}",
|
||||
metadata_path.display()
|
||||
)
|
||||
})?;
|
||||
TimelineMetadata::from_bytes(&metadata_bytes).with_context(|| {
|
||||
format!(
|
||||
"Failed to parse metadata bytes from path {}",
|
||||
metadata_path.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -14,11 +14,13 @@ use std::cmp::{max, min, Ordering};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs;
|
||||
use std::ops::{Deref, Range};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{self, AtomicBool, AtomicI64, Ordering as AtomicOrdering};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering as AtomicOrdering};
|
||||
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
use crate::storage_sync::index::{IndexPart, RelativePath};
|
||||
use crate::storage_sync::RemoteTimelineClient;
|
||||
use crate::tenant::{
|
||||
delta_layer::{DeltaLayer, DeltaLayerWriter},
|
||||
ephemeral_file::is_ephemeral_file,
|
||||
@@ -31,7 +33,7 @@ use crate::tenant::{
|
||||
storage_layer::{Layer, ValueReconstructResult, ValueReconstructState},
|
||||
};
|
||||
|
||||
use crate::config::{PageServerConf, METADATA_FILE_NAME};
|
||||
use crate::config::PageServerConf;
|
||||
use crate::keyspace::{KeyPartitioning, KeySpace};
|
||||
use crate::metrics::TimelineMetrics;
|
||||
use crate::pgdatadir_mapping::BlockNumber;
|
||||
@@ -40,6 +42,7 @@ use crate::pgdatadir_mapping::{is_rel_fsm_block_key, is_rel_vm_block_key};
|
||||
use crate::tenant_config::TenantConfOpt;
|
||||
use pageserver_api::reltag::RelTag;
|
||||
|
||||
use postgres_connection::PgConnectionConfig;
|
||||
use postgres_ffi::to_pg_timestamp;
|
||||
use utils::{
|
||||
id::{TenantId, TimelineId},
|
||||
@@ -50,16 +53,14 @@ use utils::{
|
||||
|
||||
use crate::repository::GcResult;
|
||||
use crate::repository::{Key, Value};
|
||||
use crate::task_mgr;
|
||||
use crate::task_mgr::TaskKind;
|
||||
use crate::walreceiver::{is_etcd_client_initialized, spawn_connection_manager_task};
|
||||
use crate::walredo::WalRedoManager;
|
||||
use crate::CheckpointConfig;
|
||||
use crate::METADATA_FILE_NAME;
|
||||
use crate::ZERO_PAGE;
|
||||
use crate::{
|
||||
page_cache,
|
||||
storage_sync::{self, index::LayerFileMetadata},
|
||||
};
|
||||
use crate::{is_temporary, task_mgr};
|
||||
use crate::{page_cache, storage_sync::index::LayerFileMetadata};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
enum FlushLoopState {
|
||||
@@ -86,6 +87,13 @@ pub struct Timeline {
|
||||
// WAL redo manager
|
||||
walredo_mgr: Arc<dyn WalRedoManager + Sync + Send>,
|
||||
|
||||
/// Remote storage client.
|
||||
///
|
||||
/// If Some, use it to upload all newly created layers to the remote storage,
|
||||
/// and keep remote metadata file in sync. In the future, also use it to download
|
||||
/// layer files on-demand.
|
||||
pub remote_client: Option<Arc<RemoteTimelineClient>>,
|
||||
|
||||
// What page versions do we hold in the repository? If we get a
|
||||
// request > last_record_lsn, we need to wait until we receive all
|
||||
// the WAL up to the request. The SeqWait provides functions for
|
||||
@@ -118,9 +126,6 @@ pub struct Timeline {
|
||||
// Metrics
|
||||
metrics: TimelineMetrics,
|
||||
|
||||
/// If `true`, will backup its files that appear after each checkpointing to the remote storage.
|
||||
upload_layers: AtomicBool,
|
||||
|
||||
/// Ensures layers aren't frozen by checkpointer between
|
||||
/// [`Timeline::get_layer_for_write`] and layer reads.
|
||||
/// Locked automatically by [`TimelineWriter`] and checkpointer.
|
||||
@@ -143,7 +148,7 @@ pub struct Timeline {
|
||||
/// A lock to ensure that no layer of the timeline is removed concurrently by other tasks.
|
||||
/// This lock is acquired in [`Timeline::gc`], [`Timeline::compact`],
|
||||
/// and [`Tenant::delete_timeline`].
|
||||
layer_removal_cs: Mutex<()>,
|
||||
pub(super) layer_removal_cs: tokio::sync::Mutex<()>,
|
||||
|
||||
// Needed to ensure that we can't create a branch at a point that was already garbage collected
|
||||
pub latest_gc_cutoff_lsn: Rcu<Lsn>,
|
||||
@@ -295,7 +300,7 @@ impl LogicalSize {
|
||||
}
|
||||
|
||||
pub struct WalReceiverInfo {
|
||||
pub wal_source_connstr: String,
|
||||
pub wal_source_connconf: PgConnectionConfig,
|
||||
pub last_received_msg_lsn: Lsn,
|
||||
pub last_received_msg_ts: u128,
|
||||
}
|
||||
@@ -407,6 +412,14 @@ impl Timeline {
|
||||
self.disk_consistent_lsn.load()
|
||||
}
|
||||
|
||||
pub fn get_remote_consistent_lsn(&self) -> Option<Lsn> {
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
remote_client.last_uploaded_consistent_lsn()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the physical size of the timeline at the latest LSN
|
||||
pub fn get_physical_size(&self) -> u64 {
|
||||
self.metrics.current_physical_size_gauge.get()
|
||||
@@ -491,15 +504,15 @@ impl Timeline {
|
||||
CheckpointConfig::Forced => {
|
||||
self.freeze_inmem_layer(false);
|
||||
self.flush_frozen_layers_and_wait().await?;
|
||||
self.compact()
|
||||
self.compact().await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compact(&self) -> anyhow::Result<()> {
|
||||
pub async fn compact(&self) -> anyhow::Result<()> {
|
||||
let last_record_lsn = self.get_last_record_lsn();
|
||||
|
||||
// Last record Lsn could be zero in case the timelie was just created
|
||||
// Last record Lsn could be zero in case the timeline was just created
|
||||
if !last_record_lsn.is_valid() {
|
||||
warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
|
||||
return Ok(());
|
||||
@@ -539,7 +552,12 @@ impl Timeline {
|
||||
// Below are functions compact_level0() and create_image_layers()
|
||||
// but they are a bit ad hoc and don't quite work like it's explained
|
||||
// above. Rewrite it.
|
||||
let _layer_removal_cs = self.layer_removal_cs.lock().unwrap();
|
||||
let _layer_removal_cs = self.layer_removal_cs.lock().await;
|
||||
// Is the timeline being deleted?
|
||||
let state = *self.state.borrow();
|
||||
if state == TimelineState::Stopping {
|
||||
anyhow::bail!("timeline is Stopping");
|
||||
}
|
||||
|
||||
let target_file_size = self.get_checkpoint_distance();
|
||||
|
||||
@@ -553,20 +571,15 @@ impl Timeline {
|
||||
// 2. Create new image layers for partitions that have been modified
|
||||
// "enough".
|
||||
let layer_paths_to_upload = self.create_image_layers(&partitioning, lsn, false)?;
|
||||
if !layer_paths_to_upload.is_empty()
|
||||
&& self.upload_layers.load(atomic::Ordering::Relaxed)
|
||||
{
|
||||
storage_sync::schedule_layer_upload(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
layer_paths_to_upload,
|
||||
None,
|
||||
);
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
for (path, layer_metadata) in layer_paths_to_upload {
|
||||
remote_client.schedule_layer_file_upload(&path, &layer_metadata)?;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Compact
|
||||
let timer = self.metrics.compact_time_histo.start_timer();
|
||||
self.compact_level0(target_file_size)?;
|
||||
self.compact_level0(target_file_size).await?;
|
||||
timer.stop_and_record();
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -655,8 +668,8 @@ impl Timeline {
|
||||
(TimelineState::Broken, _) => {
|
||||
error!("Ignoring state update {new_state:?} for broken tenant");
|
||||
}
|
||||
(TimelineState::Paused, TimelineState::Active) => {
|
||||
debug!("Not activating a paused timeline");
|
||||
(TimelineState::Stopping, TimelineState::Active) => {
|
||||
debug!("Not activating a Stopping timeline");
|
||||
}
|
||||
(_, new_state) => {
|
||||
self.state.send_replace(new_state);
|
||||
@@ -726,7 +739,7 @@ impl Timeline {
|
||||
timeline_id: TimelineId,
|
||||
tenant_id: TenantId,
|
||||
walredo_mgr: Arc<dyn WalRedoManager + Send + Sync>,
|
||||
upload_layers: bool,
|
||||
remote_client: Option<RemoteTimelineClient>,
|
||||
pg_version: u32,
|
||||
) -> Self {
|
||||
let disk_consistent_lsn = metadata.disk_consistent_lsn();
|
||||
@@ -745,6 +758,8 @@ impl Timeline {
|
||||
|
||||
walredo_mgr,
|
||||
|
||||
remote_client: remote_client.map(Arc::new),
|
||||
|
||||
// initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
|
||||
last_record_lsn: SeqWait::new(RecordLsn {
|
||||
last: disk_consistent_lsn,
|
||||
@@ -760,15 +775,13 @@ impl Timeline {
|
||||
|
||||
metrics: TimelineMetrics::new(&tenant_id, &timeline_id),
|
||||
|
||||
upload_layers: AtomicBool::new(upload_layers),
|
||||
|
||||
flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
|
||||
|
||||
layer_flush_start_tx,
|
||||
layer_flush_done_tx,
|
||||
|
||||
write_lock: Mutex::new(()),
|
||||
layer_removal_cs: Mutex::new(()),
|
||||
layer_removal_cs: Default::default(),
|
||||
|
||||
gc_info: RwLock::new(GcInfo {
|
||||
retain_lsns: Vec::new(),
|
||||
@@ -874,6 +887,7 @@ impl Timeline {
|
||||
walreceiver_connect_timeout,
|
||||
lagging_wal_timeout,
|
||||
max_lsn_wal_lag,
|
||||
crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -895,6 +909,7 @@ impl Timeline {
|
||||
|
||||
for direntry in fs::read_dir(timeline_path)? {
|
||||
let direntry = direntry?;
|
||||
let direntry_path = direntry.path();
|
||||
let fname = direntry.file_name();
|
||||
let fname = fname.to_string_lossy();
|
||||
|
||||
@@ -906,7 +921,7 @@ impl Timeline {
|
||||
imgfilename, self.timeline_id, disk_consistent_lsn
|
||||
);
|
||||
|
||||
rename_to_backup(direntry.path())?;
|
||||
rename_to_backup(&direntry_path)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -916,6 +931,7 @@ impl Timeline {
|
||||
trace!("found layer {}", layer.filename().display());
|
||||
total_physical_size += layer.path().metadata()?.len();
|
||||
layers.insert_historic(Arc::new(layer));
|
||||
layers.rebuild_index();
|
||||
num_layers += 1;
|
||||
} else if let Some(deltafilename) = DeltaFileName::parse_str(&fname) {
|
||||
// Create a DeltaLayer struct for each delta file.
|
||||
@@ -930,7 +946,7 @@ impl Timeline {
|
||||
deltafilename, self.timeline_id, disk_consistent_lsn
|
||||
);
|
||||
|
||||
rename_to_backup(direntry.path())?;
|
||||
rename_to_backup(&direntry_path)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -940,13 +956,27 @@ impl Timeline {
|
||||
trace!("found layer {}", layer.filename().display());
|
||||
total_physical_size += layer.path().metadata()?.len();
|
||||
layers.insert_historic(Arc::new(layer));
|
||||
layers.rebuild_index();
|
||||
num_layers += 1;
|
||||
} else if fname == METADATA_FILE_NAME || fname.ends_with(".old") {
|
||||
// ignore these
|
||||
} else if crate::storage_sync::is_temp_download_file(&direntry_path) {
|
||||
info!(
|
||||
"skipping temp download file, reconcile_with_remote will resume / clean up: {}",
|
||||
fname
|
||||
);
|
||||
} else if is_ephemeral_file(&fname) {
|
||||
// Delete any old ephemeral files
|
||||
trace!("deleting old ephemeral file in timeline dir: {}", fname);
|
||||
fs::remove_file(direntry.path())?;
|
||||
fs::remove_file(&direntry_path)?;
|
||||
} else if is_temporary(&direntry_path) {
|
||||
info!("removing temp timeline file at {}", direntry_path.display());
|
||||
fs::remove_file(&direntry_path).with_context(|| {
|
||||
format!(
|
||||
"failed to remove temp download file at {}",
|
||||
direntry_path.display()
|
||||
)
|
||||
})?;
|
||||
} else {
|
||||
warn!("unrecognized filename in timeline dir: {}", fname);
|
||||
}
|
||||
@@ -967,10 +997,224 @@ impl Timeline {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn layer_removal_guard(&self) -> anyhow::Result<MutexGuard<()>> {
|
||||
self.layer_removal_cs
|
||||
.try_lock()
|
||||
.map_err(|e| anyhow!("cannot lock compaction critical section {e}"))
|
||||
async fn download_missing(
|
||||
&self,
|
||||
index_part: &IndexPart,
|
||||
remote_client: &RemoteTimelineClient,
|
||||
mut local_filenames: HashSet<PathBuf>,
|
||||
up_to_date_disk_consistent_lsn: Lsn,
|
||||
) -> anyhow::Result<HashSet<PathBuf>> {
|
||||
let mut remote_filenames: HashSet<PathBuf> = HashSet::new();
|
||||
for fname in index_part.timeline_layers.iter() {
|
||||
remote_filenames.insert(fname.to_local_path(&PathBuf::from("")));
|
||||
}
|
||||
|
||||
// Are there any local files that exist, with a size that doesn't match
|
||||
// with the size stored in the remote index file?
|
||||
// If so, rename_to_backup those files so that we re-download them later.
|
||||
local_filenames.retain(|path| {
|
||||
let layer_metadata = index_part
|
||||
.layer_metadata
|
||||
.get(&RelativePath::from_filename(path))
|
||||
.map(LayerFileMetadata::from)
|
||||
.unwrap_or(LayerFileMetadata::MISSING);
|
||||
|
||||
if let Some(remote_size) = layer_metadata.file_size() {
|
||||
let local_path = self.conf.timeline_path(&self.timeline_id, &self.tenant_id).join(&path);
|
||||
match local_path.metadata() {
|
||||
Ok(metadata) => {
|
||||
let local_size = metadata.len();
|
||||
|
||||
if local_size != remote_size {
|
||||
warn!("removing local file \"{}\" because it has unexpected length {}; length in remote index is {}",
|
||||
path.display(),
|
||||
local_size,
|
||||
remote_size);
|
||||
if let Err(err) = rename_to_backup(&local_path) {
|
||||
error!("could not rename file \"{}\": {:?}",
|
||||
local_path.display(), err);
|
||||
}
|
||||
self.metrics.current_physical_size_gauge.sub(local_size);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("could not get size of local file \"{}\": {:?}", path.display(), err);
|
||||
true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
// Are we missing some files that are present in remote storage?
|
||||
// Download them now.
|
||||
// TODO Downloading many files this way is not efficient.
|
||||
// Better to use FuturesUnordered. Maybe keep as is because:
|
||||
// a) inplace download is a throw-away code, on-demand patch doesnt need that
|
||||
// b) typical case now is that there is nothing to sync, this downloads a lot
|
||||
// 1) if there was another pageserver that came and generated new files
|
||||
// 2) during attach of a timeline with big history which we currently do not do
|
||||
for path in remote_filenames.difference(&local_filenames) {
|
||||
let fname = path.to_str().unwrap();
|
||||
info!("remote layer file {fname} does not exist locally");
|
||||
|
||||
let layer_metadata = index_part
|
||||
.layer_metadata
|
||||
.get(&RelativePath::from_filename(path))
|
||||
.map(LayerFileMetadata::from)
|
||||
.unwrap_or(LayerFileMetadata::MISSING);
|
||||
|
||||
if let Some(imgfilename) = ImageFileName::parse_str(fname) {
|
||||
if imgfilename.lsn > up_to_date_disk_consistent_lsn {
|
||||
warn!(
|
||||
"found future image layer {} on timeline {} remote_consistent_lsn is {}",
|
||||
imgfilename, self.timeline_id, up_to_date_disk_consistent_lsn
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("downloading image file: {}", file = path.display());
|
||||
let sz = remote_client
|
||||
.download_layer_file(&RelativePath::from_filename(path), &layer_metadata)
|
||||
.await
|
||||
.context("download image layer")?;
|
||||
trace!("done");
|
||||
|
||||
let image_layer =
|
||||
ImageLayer::new(self.conf, self.timeline_id, self.tenant_id, &imgfilename);
|
||||
|
||||
{
|
||||
let mut layers = self.layers.write().unwrap();
|
||||
layers.insert_historic(Arc::new(image_layer));
|
||||
layers.rebuild_index();
|
||||
}
|
||||
self.metrics.current_physical_size_gauge.add(sz);
|
||||
} else if let Some(deltafilename) = DeltaFileName::parse_str(fname) {
|
||||
// Create a DeltaLayer struct for each delta file.
|
||||
// The end-LSN is exclusive, while disk_consistent_lsn is
|
||||
// inclusive. For example, if disk_consistent_lsn is 100, it is
|
||||
// OK for a delta layer to have end LSN 101, but if the end LSN
|
||||
// is 102, then it might not have been fully flushed to disk
|
||||
// before crash.
|
||||
if deltafilename.lsn_range.end > up_to_date_disk_consistent_lsn + 1 {
|
||||
warn!(
|
||||
"found future delta layer {} on timeline {} remote_consistent_lsn is {}",
|
||||
deltafilename, self.timeline_id, up_to_date_disk_consistent_lsn
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("downloading image file: {}", file = path.display());
|
||||
let sz = remote_client
|
||||
.download_layer_file(&RelativePath::from_filename(path), &layer_metadata)
|
||||
.await
|
||||
.context("download delta layer")?;
|
||||
trace!("done");
|
||||
|
||||
let delta_layer =
|
||||
DeltaLayer::new(self.conf, self.timeline_id, self.tenant_id, &deltafilename);
|
||||
|
||||
{
|
||||
let mut layers = self.layers.write().unwrap();
|
||||
layers.insert_historic(Arc::new(delta_layer));
|
||||
layers.rebuild_index();
|
||||
}
|
||||
self.metrics.current_physical_size_gauge.add(sz);
|
||||
} else {
|
||||
bail!("unexpected layer filename in remote storage: {}", fname);
|
||||
}
|
||||
}
|
||||
|
||||
// now these are local only filenames
|
||||
let local_only_filenames = local_filenames
|
||||
.difference(&remote_filenames)
|
||||
.cloned()
|
||||
.collect();
|
||||
Ok(local_only_filenames)
|
||||
}
|
||||
|
||||
///
|
||||
/// This function will synchronize local data with what we have in remote storage.
|
||||
/// 1. It will download missing layer files.
|
||||
/// 2. It will update local metadata if remote one has greater `disk_consistent_lsn`.
|
||||
/// 3. It will upload files that are missing on the remote
|
||||
/// 4. It will update index file on the remote accordingly
|
||||
/// TODO may be a bit cleaner to do things based on populated remote client,
|
||||
/// and then do things based on its upload_queue.latest_files
|
||||
///
|
||||
/// This is used during tenant attach. The layer map must have been loaded
|
||||
/// with local filesystem contents already.
|
||||
///
|
||||
/// The caller should provide IndexPart if it exists on the remote storage. If it's None,
|
||||
/// we assume that it is missing on the remote storage, which means that we initialized
|
||||
/// a timeline and then restarted before successful upload was performed
|
||||
///
|
||||
#[instrument(skip(self, index_part, up_to_date_metadata))]
|
||||
pub async fn reconcile_with_remote(
|
||||
&self,
|
||||
up_to_date_metadata: &TimelineMetadata,
|
||||
index_part: Option<&IndexPart>,
|
||||
) -> anyhow::Result<()> {
|
||||
info!("starting");
|
||||
let remote_client = self
|
||||
.remote_client
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("cannot download without remote storage"))?;
|
||||
|
||||
let disk_consistent_lsn = up_to_date_metadata.disk_consistent_lsn();
|
||||
|
||||
// Build a map of local layers for quick lookups
|
||||
let mut local_filenames: HashSet<PathBuf> = HashSet::new();
|
||||
for layer in self.layers.read().unwrap().iter_historic_layers() {
|
||||
local_filenames.insert(layer.filename());
|
||||
}
|
||||
|
||||
let local_only_filenames = match index_part {
|
||||
Some(index_part) => {
|
||||
info!(
|
||||
"initializing upload queue from remote index with {} layer files",
|
||||
index_part.timeline_layers.len()
|
||||
);
|
||||
remote_client.init_upload_queue(index_part)?;
|
||||
let local_only_filenames = self
|
||||
.download_missing(
|
||||
index_part,
|
||||
remote_client,
|
||||
local_filenames,
|
||||
disk_consistent_lsn,
|
||||
)
|
||||
.await?;
|
||||
local_only_filenames
|
||||
}
|
||||
None => {
|
||||
info!("initializing upload queue as empty");
|
||||
remote_client.init_upload_queue_for_empty_remote(up_to_date_metadata)?;
|
||||
local_filenames
|
||||
}
|
||||
};
|
||||
|
||||
// Are there local files that don't exist remotely? Schedule uploads for them
|
||||
let timeline_path = self.conf.timeline_path(&self.timeline_id, &self.tenant_id);
|
||||
for fname in &local_only_filenames {
|
||||
let absolute = timeline_path.join(fname);
|
||||
let sz = absolute
|
||||
.metadata()
|
||||
.with_context(|| format!("failed to get file {} metadata", fname.display()))?
|
||||
.len();
|
||||
info!("scheduling {} for upload", fname.display());
|
||||
remote_client.schedule_layer_file_upload(&absolute, &LayerFileMetadata::new(sz))?;
|
||||
}
|
||||
if !local_only_filenames.is_empty() {
|
||||
remote_client.schedule_index_upload(up_to_date_metadata)?;
|
||||
}
|
||||
|
||||
info!("Done");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn try_spawn_size_init_task(self: &Arc<Self>, init_lsn: Lsn) {
|
||||
@@ -1011,7 +1255,7 @@ impl Timeline {
|
||||
match new_state {
|
||||
// we're running this job for active timelines only
|
||||
TimelineState::Active => continue,
|
||||
TimelineState::Broken | TimelineState::Paused | TimelineState::Suspended => return Some(new_state),
|
||||
TimelineState::Broken | TimelineState::Stopping | TimelineState::Suspended => return Some(new_state),
|
||||
}
|
||||
}
|
||||
Err(_sender_dropped_error) => return None,
|
||||
@@ -1472,7 +1716,8 @@ impl Timeline {
|
||||
// After crash, we will restart WAL streaming and processing from that point.
|
||||
if disk_consistent_lsn != old_disk_consistent_lsn {
|
||||
assert!(disk_consistent_lsn > old_disk_consistent_lsn);
|
||||
self.update_metadata_file(disk_consistent_lsn, layer_paths_to_upload)?;
|
||||
self.update_metadata_file(disk_consistent_lsn, layer_paths_to_upload)
|
||||
.context("update_metadata_file")?;
|
||||
// Also update the in-memory copy
|
||||
self.disk_consistent_lsn.store(disk_consistent_lsn);
|
||||
}
|
||||
@@ -1527,15 +1772,18 @@ impl Timeline {
|
||||
self.tenant_id,
|
||||
&metadata,
|
||||
false,
|
||||
)?;
|
||||
)
|
||||
.context("save_metadata")?;
|
||||
|
||||
if self.can_upload_layers() {
|
||||
storage_sync::schedule_layer_upload(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
layer_paths_to_upload,
|
||||
Some(metadata),
|
||||
);
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
for (path, layer_metadata) in layer_paths_to_upload {
|
||||
remote_client
|
||||
.schedule_layer_file_upload(&path, &layer_metadata)
|
||||
.context("schedule_layer_file_upload")?;
|
||||
}
|
||||
remote_client
|
||||
.schedule_index_upload(&metadata)
|
||||
.context("schedule_layer_file_upload")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1567,6 +1815,7 @@ impl Timeline {
|
||||
{
|
||||
let mut layers = self.layers.write().unwrap();
|
||||
layers.insert_historic(Arc::new(new_delta));
|
||||
layers.rebuild_index();
|
||||
}
|
||||
|
||||
// update the timeline's physical size
|
||||
@@ -1726,24 +1975,31 @@ impl Timeline {
|
||||
self.metrics.current_physical_size_gauge.add(metadata.len());
|
||||
layers.insert_historic(Arc::new(l));
|
||||
}
|
||||
layers.rebuild_index();
|
||||
drop(layers);
|
||||
timer.stop_and_record();
|
||||
|
||||
Ok(layer_paths_to_upload)
|
||||
}
|
||||
}
|
||||
#[derive(Default)]
|
||||
struct CompactLevel0Phase1Result {
|
||||
new_layers: Vec<DeltaLayer>,
|
||||
deltas_to_compact: Vec<Arc<dyn Layer>>,
|
||||
}
|
||||
|
||||
///
|
||||
/// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
|
||||
/// as Level 1 files.
|
||||
///
|
||||
fn compact_level0(&self, target_file_size: u64) -> anyhow::Result<()> {
|
||||
impl Timeline {
|
||||
async fn compact_level0_phase1(
|
||||
&self,
|
||||
target_file_size: u64,
|
||||
) -> anyhow::Result<CompactLevel0Phase1Result> {
|
||||
let layers = self.layers.read().unwrap();
|
||||
let mut level0_deltas = layers.get_level0_deltas()?;
|
||||
drop(layers);
|
||||
|
||||
// Only compact if enough layers have accumulated.
|
||||
if level0_deltas.is_empty() || level0_deltas.len() < self.get_compaction_threshold() {
|
||||
return Ok(());
|
||||
return Ok(Default::default());
|
||||
}
|
||||
|
||||
// Gather the files to compact in this iteration.
|
||||
@@ -1978,6 +2234,35 @@ impl Timeline {
|
||||
layer_paths.pop().unwrap();
|
||||
}
|
||||
|
||||
drop(all_keys_iter); // So that deltas_to_compact is no longer borrowed
|
||||
|
||||
Ok(CompactLevel0Phase1Result {
|
||||
new_layers,
|
||||
deltas_to_compact,
|
||||
})
|
||||
}
|
||||
|
||||
///
|
||||
/// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
|
||||
/// as Level 1 files.
|
||||
///
|
||||
async fn compact_level0(&self, target_file_size: u64) -> anyhow::Result<()> {
|
||||
let CompactLevel0Phase1Result {
|
||||
new_layers,
|
||||
deltas_to_compact,
|
||||
} = self.compact_level0_phase1(target_file_size).await?;
|
||||
|
||||
// Before deleting any layers, we need to wait for their upload ops to finish.
|
||||
// See storage_sync module level comment on consistency.
|
||||
// Do it here because we don't want to hold self.layers.write() while waiting.
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
info!("waiting for upload ops to complete");
|
||||
remote_client
|
||||
.wait_completion()
|
||||
.await
|
||||
.context("wait for layer upload ops to complete")?;
|
||||
}
|
||||
|
||||
let mut layers = self.layers.write().unwrap();
|
||||
let mut new_layer_paths = HashMap::with_capacity(new_layers.len());
|
||||
for l in new_layers {
|
||||
@@ -1985,41 +2270,40 @@ impl Timeline {
|
||||
|
||||
let metadata = new_delta_path.metadata()?;
|
||||
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
remote_client.schedule_layer_file_upload(
|
||||
&new_delta_path,
|
||||
&LayerFileMetadata::new(metadata.len()),
|
||||
)?;
|
||||
}
|
||||
|
||||
// update the timeline's physical size
|
||||
self.metrics.current_physical_size_gauge.add(metadata.len());
|
||||
|
||||
new_layer_paths.insert(new_delta_path, LayerFileMetadata::new(metadata.len()));
|
||||
layers.insert_historic(Arc::new(l));
|
||||
layers.rebuild_index();
|
||||
}
|
||||
|
||||
// Now that we have reshuffled the data to set of new delta layers, we can
|
||||
// delete the old ones
|
||||
let mut layer_paths_do_delete = HashSet::with_capacity(deltas_to_compact.len());
|
||||
drop(all_keys_iter);
|
||||
let mut layer_paths_to_delete = Vec::with_capacity(deltas_to_compact.len());
|
||||
for l in deltas_to_compact {
|
||||
if let Some(path) = l.local_path() {
|
||||
self.metrics
|
||||
.current_physical_size_gauge
|
||||
.sub(path.metadata()?.len());
|
||||
layer_paths_do_delete.insert(path);
|
||||
layer_paths_to_delete.push(path);
|
||||
}
|
||||
l.delete()?;
|
||||
layers.remove_historic(l);
|
||||
}
|
||||
layers.rebuild_index();
|
||||
drop(layers);
|
||||
|
||||
if self.can_upload_layers() {
|
||||
storage_sync::schedule_layer_upload(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
new_layer_paths,
|
||||
None,
|
||||
);
|
||||
storage_sync::schedule_layer_delete(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
layer_paths_do_delete,
|
||||
);
|
||||
// Also schedule the deletions in remote storage
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
remote_client.schedule_layer_file_deletion(&layer_paths_to_delete)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -2111,25 +2395,43 @@ impl Timeline {
|
||||
/// within a layer file. We can only remove the whole file if it's fully
|
||||
/// obsolete.
|
||||
///
|
||||
pub(super) fn gc(&self) -> anyhow::Result<GcResult> {
|
||||
let mut result: GcResult = GcResult::default();
|
||||
let now = SystemTime::now();
|
||||
|
||||
pub(super) async fn gc(&self) -> anyhow::Result<GcResult> {
|
||||
fail_point!("before-timeline-gc");
|
||||
|
||||
let _layer_removal_cs = self.layer_removal_cs.lock().unwrap();
|
||||
let _layer_removal_cs = self.layer_removal_cs.lock().await;
|
||||
// Is the timeline being deleted?
|
||||
let state = *self.state.borrow();
|
||||
if state == TimelineState::Stopping {
|
||||
anyhow::bail!("timeline is Stopping");
|
||||
}
|
||||
|
||||
let gc_info = self.gc_info.read().unwrap();
|
||||
let (horizon_cutoff, pitr_cutoff, retain_lsns) = {
|
||||
let gc_info = self.gc_info.read().unwrap();
|
||||
|
||||
let horizon_cutoff = min(gc_info.horizon_cutoff, self.get_disk_consistent_lsn());
|
||||
let pitr_cutoff = gc_info.pitr_cutoff;
|
||||
let retain_lsns = &gc_info.retain_lsns;
|
||||
let horizon_cutoff = min(gc_info.horizon_cutoff, self.get_disk_consistent_lsn());
|
||||
let pitr_cutoff = gc_info.pitr_cutoff;
|
||||
let retain_lsns = gc_info.retain_lsns.clone();
|
||||
(horizon_cutoff, pitr_cutoff, retain_lsns)
|
||||
};
|
||||
|
||||
let new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
|
||||
|
||||
let _enter =
|
||||
info_span!("gc_timeline", timeline = %self.timeline_id, cutoff = %new_gc_cutoff)
|
||||
.entered();
|
||||
self.gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff)
|
||||
.instrument(
|
||||
info_span!("gc_timeline", timeline = %self.timeline_id, cutoff = %new_gc_cutoff),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn gc_timeline(
|
||||
&self,
|
||||
horizon_cutoff: Lsn,
|
||||
pitr_cutoff: Lsn,
|
||||
retain_lsns: Vec<Lsn>,
|
||||
new_gc_cutoff: Lsn,
|
||||
) -> anyhow::Result<GcResult> {
|
||||
let now = SystemTime::now();
|
||||
let mut result: GcResult = GcResult::default();
|
||||
|
||||
// Nothing to GC. Return early.
|
||||
let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
|
||||
@@ -2163,6 +2465,17 @@ impl Timeline {
|
||||
|
||||
debug!("retain_lsns: {:?}", retain_lsns);
|
||||
|
||||
// Before deleting any layers, we need to wait for their upload ops to finish.
|
||||
// See storage_sync module level comment on consistency.
|
||||
// Do it here because we don't want to hold self.layers.write() while waiting.
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
info!("waiting for upload ops to complete");
|
||||
remote_client
|
||||
.wait_completion()
|
||||
.await
|
||||
.context("wait for layer upload ops to complete")?;
|
||||
}
|
||||
|
||||
let mut layers_to_remove = Vec::new();
|
||||
|
||||
// Scan all on-disk layers in the timeline.
|
||||
@@ -2214,7 +2527,7 @@ impl Timeline {
|
||||
// might be referenced by child branches forever.
|
||||
// We can track this in child timeline GC and delete parent layers when
|
||||
// they are no longer needed. This might be complicated with long inheritance chains.
|
||||
for retain_lsn in retain_lsns {
|
||||
for retain_lsn in &retain_lsns {
|
||||
// start_lsn is inclusive
|
||||
if &l.get_lsn_range().start <= retain_lsn {
|
||||
debug!(
|
||||
@@ -2270,18 +2583,19 @@ impl Timeline {
|
||||
// Actually delete the layers from disk and remove them from the map.
|
||||
// (couldn't do this in the loop above, because you cannot modify a collection
|
||||
// while iterating it. BTreeMap::retain() would be another option)
|
||||
let mut layer_paths_to_delete = HashSet::with_capacity(layers_to_remove.len());
|
||||
let mut layer_paths_to_delete = Vec::with_capacity(layers_to_remove.len());
|
||||
for doomed_layer in layers_to_remove {
|
||||
if let Some(path) = doomed_layer.local_path() {
|
||||
self.metrics
|
||||
.current_physical_size_gauge
|
||||
.sub(path.metadata()?.len());
|
||||
layer_paths_to_delete.insert(path);
|
||||
layer_paths_to_delete.push(path);
|
||||
}
|
||||
doomed_layer.delete()?;
|
||||
layers.remove_historic(doomed_layer);
|
||||
result.layers_removed += 1;
|
||||
}
|
||||
layers.rebuild_index();
|
||||
|
||||
info!(
|
||||
"GC completed removing {} layers, cutoff {}",
|
||||
@@ -2292,12 +2606,8 @@ impl Timeline {
|
||||
fail_point!("after-timeline-gc-removed-layers");
|
||||
}
|
||||
|
||||
if self.can_upload_layers() {
|
||||
storage_sync::schedule_layer_delete(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
layer_paths_to_delete,
|
||||
);
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
remote_client.schedule_layer_file_deletion(&layer_paths_to_delete)?;
|
||||
}
|
||||
|
||||
result.elapsed = now.elapsed()?;
|
||||
@@ -2378,11 +2688,6 @@ impl Timeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn can_upload_layers(&self) -> bool {
|
||||
self.upload_layers.load(atomic::Ordering::Relaxed)
|
||||
&& self.current_state() != TimelineState::Broken
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function for get_reconstruct_data() to add the path of layers traversed
|
||||
@@ -2460,12 +2765,12 @@ impl<'a> TimelineWriter<'a> {
|
||||
|
||||
/// Add a suffix to a layer file's name: .{num}.old
|
||||
/// Uses the first available num (starts at 0)
|
||||
fn rename_to_backup(path: PathBuf) -> anyhow::Result<()> {
|
||||
fn rename_to_backup(path: &Path) -> anyhow::Result<()> {
|
||||
let filename = path
|
||||
.file_name()
|
||||
.ok_or_else(|| anyhow!("Path {} don't have a file name", path.display()))?
|
||||
.to_string_lossy();
|
||||
let mut new_path = path.clone();
|
||||
let mut new_path = path.to_owned();
|
||||
|
||||
for i in 0u32.. {
|
||||
new_path.set_file_name(format!("{}.{}.old", filename, i));
|
||||
|
||||
@@ -51,6 +51,7 @@ pub struct TenantConf {
|
||||
// This parameter determines L1 layer file size.
|
||||
pub compaction_target_size: u64,
|
||||
// How often to check if there's compaction work to be done.
|
||||
// Duration::ZERO means automatic compaction is disabled.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub compaction_period: Duration,
|
||||
// Level0 delta layer threshold for compaction.
|
||||
@@ -61,6 +62,7 @@ pub struct TenantConf {
|
||||
// Page versions older than this are garbage collected away.
|
||||
pub gc_horizon: u64,
|
||||
// Interval at which garbage collection is triggered.
|
||||
// Duration::ZERO means automatic GC is disabled
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub gc_period: Duration,
|
||||
// Delta layer churn threshold to create L1 image layers.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! This module acts as a switchboard to access different repositories managed by this
|
||||
//! page server.
|
||||
|
||||
use std::collections::{hash_map, HashMap};
|
||||
use std::collections::hash_map;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
@@ -12,19 +12,12 @@ use tracing::*;
|
||||
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
|
||||
use crate::config::{PageServerConf, METADATA_FILE_NAME, TIMELINE_UNINIT_MARK_SUFFIX};
|
||||
use crate::http::models::TenantInfo;
|
||||
use crate::storage_sync::index::{LayerFileMetadata, RemoteIndex, RemoteTimelineIndex};
|
||||
use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData, TimelineLocalFiles};
|
||||
use crate::config::PageServerConf;
|
||||
use crate::task_mgr::{self, TaskKind};
|
||||
use crate::tenant::{
|
||||
ephemeral_file::is_ephemeral_file, metadata::TimelineMetadata, Tenant, TenantState,
|
||||
};
|
||||
use crate::tenant::{Tenant, TenantState};
|
||||
use crate::tenant_config::TenantConfOpt;
|
||||
use crate::walredo::PostgresRedoManager;
|
||||
use crate::TEMP_FILE_SUFFIX;
|
||||
|
||||
use utils::crashsafe::{self, path_with_suffix_extension};
|
||||
use utils::fs_ext::PathExt;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
mod tenants_state {
|
||||
@@ -59,153 +52,112 @@ mod tenants_state {
|
||||
pub fn init_tenant_mgr(
|
||||
conf: &'static PageServerConf,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> anyhow::Result<RemoteIndex> {
|
||||
) -> anyhow::Result<()> {
|
||||
let _entered = info_span!("init_tenant_mgr").entered();
|
||||
|
||||
let local_tenant_files = local_tenant_timeline_files(conf)
|
||||
.context("Failed to collect local tenant timeline files")?;
|
||||
|
||||
let (remote_index, tenants_to_attach) = if let Some(storage) = remote_storage {
|
||||
let storage_config = conf
|
||||
.remote_storage_config
|
||||
.as_ref()
|
||||
.expect("remote storage without config");
|
||||
let mut broken_tenants = HashMap::new();
|
||||
let mut ready_tenants = HashMap::new();
|
||||
for (tenant_id, tenant_attach_data) in local_tenant_files.into_iter() {
|
||||
match tenant_attach_data {
|
||||
TenantAttachData::Ready(t) => {
|
||||
ready_tenants.insert(tenant_id, t);
|
||||
}
|
||||
TenantAttachData::Broken(e) => {
|
||||
broken_tenants.insert(tenant_id, TenantAttachData::Broken(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
let SyncStartupData {
|
||||
remote_index,
|
||||
local_timeline_init_statuses,
|
||||
} = storage_sync::spawn_storage_sync_task(
|
||||
conf,
|
||||
ready_tenants,
|
||||
storage,
|
||||
storage_config.max_concurrent_syncs,
|
||||
storage_config.max_sync_errors,
|
||||
)
|
||||
.context("Failed to spawn the storage sync thread")?;
|
||||
|
||||
let n = local_timeline_init_statuses.0.len();
|
||||
let mut synced_timelines = local_timeline_init_statuses.0.into_iter().fold(
|
||||
HashMap::<TenantId, TenantAttachData>::with_capacity(n),
|
||||
|mut new_values, (tenant_id, old_values)| {
|
||||
let new_timeline_values = new_values
|
||||
.entry(tenant_id)
|
||||
.or_insert_with(|| TenantAttachData::Ready(HashMap::new()));
|
||||
if let TenantAttachData::Ready(t) = new_timeline_values {
|
||||
for (timeline_id, old_value) in old_values {
|
||||
if let LocalTimelineInitStatus::LocallyComplete(metadata) = old_value {
|
||||
t.insert(timeline_id, TimelineLocalFiles::ready(metadata));
|
||||
// Scan local filesystem for attached tenants
|
||||
let mut number_of_tenants = 0;
|
||||
let tenants_dir = conf.tenants_path();
|
||||
for dir_entry in std::fs::read_dir(&tenants_dir)
|
||||
.with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))?
|
||||
{
|
||||
match &dir_entry {
|
||||
Ok(dir_entry) => {
|
||||
let tenant_dir_path = dir_entry.path();
|
||||
if crate::is_temporary(&tenant_dir_path) {
|
||||
info!(
|
||||
"Found temporary tenant directory, removing: {}",
|
||||
tenant_dir_path.display()
|
||||
);
|
||||
if let Err(e) = std::fs::remove_dir_all(&tenant_dir_path) {
|
||||
error!(
|
||||
"Failed to remove temporary directory '{}': {:?}",
|
||||
tenant_dir_path.display(),
|
||||
e
|
||||
);
|
||||
}
|
||||
} else {
|
||||
match load_local_tenant(conf, &tenant_dir_path, remote_storage.clone()) {
|
||||
Ok(Some(tenant)) => {
|
||||
tenants_state::write_tenants().insert(tenant.tenant_id(), tenant);
|
||||
number_of_tenants += 1;
|
||||
}
|
||||
Ok(None) => {
|
||||
// This case happens if we crash during attach before creating the attach marker file
|
||||
if let Err(e) = std::fs::remove_dir(&tenant_dir_path) {
|
||||
error!(
|
||||
"Failed to remove empty tenant directory '{}': {e:#}",
|
||||
tenant_dir_path.display()
|
||||
)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to collect tenant files from dir '{}' for entry {:?}, reason: {:#}",
|
||||
tenants_dir.display(),
|
||||
dir_entry,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
new_values
|
||||
},
|
||||
);
|
||||
synced_timelines.extend(broken_tenants);
|
||||
|
||||
(remote_index, synced_timelines)
|
||||
} else {
|
||||
info!("No remote storage configured, skipping storage sync, considering all local timelines with correct metadata files enabled");
|
||||
(RemoteIndex::default(), local_tenant_files)
|
||||
};
|
||||
attach_local_tenants(conf, &remote_index, tenants_to_attach);
|
||||
|
||||
Ok(remote_index)
|
||||
}
|
||||
|
||||
/// Reads local files to load tenants and their timelines given into pageserver's memory.
|
||||
/// Ignores other timelines that might be present for tenant, but were not passed as a parameter.
|
||||
/// Attempts to load as many entites as possible: if a certain timeline fails during the load, the tenant is marked as "Broken",
|
||||
/// and the load continues.
|
||||
///
|
||||
/// For successful tenant attach, it first has to have a `timelines/` subdirectory and a tenant config file that's loaded into memory successfully.
|
||||
/// If either of the conditions fails, the tenant will be added to memory with [`TenantState::Broken`] state, otherwise we start to load its timelines.
|
||||
/// Alternatively, tenant is considered loaded successfully, if it's already in pageserver's memory (i.e. was loaded already before).
|
||||
///
|
||||
/// Attach happens on startup and sucessful timeline downloads
|
||||
/// (some subset of timeline files, always including its metadata, after which the new one needs to be registered).
|
||||
pub fn attach_local_tenants(
|
||||
conf: &'static PageServerConf,
|
||||
remote_index: &RemoteIndex,
|
||||
tenants_to_attach: HashMap<TenantId, TenantAttachData>,
|
||||
) {
|
||||
let _entered = info_span!("attach_local_tenants").entered();
|
||||
let number_of_tenants = tenants_to_attach.len();
|
||||
|
||||
for (tenant_id, local_timelines) in tenants_to_attach {
|
||||
let mut tenants_accessor = tenants_state::write_tenants();
|
||||
let tenant = match tenants_accessor.entry(tenant_id) {
|
||||
hash_map::Entry::Occupied(o) => {
|
||||
info!("Tenant {tenant_id} was found in pageserver's memory");
|
||||
Arc::clone(o.get())
|
||||
}
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
info!("Tenant {tenant_id} was not found in pageserver's memory, loading it");
|
||||
let tenant = Arc::new(Tenant::new(
|
||||
conf,
|
||||
TenantConfOpt::default(),
|
||||
Arc::new(PostgresRedoManager::new(conf, tenant_id)),
|
||||
tenant_id,
|
||||
remote_index.clone(),
|
||||
conf.remote_storage_config.is_some(),
|
||||
));
|
||||
match local_timelines {
|
||||
TenantAttachData::Broken(_) => {
|
||||
tenant.set_state(TenantState::Broken);
|
||||
}
|
||||
TenantAttachData::Ready(_) => {
|
||||
match Tenant::load_tenant_config(conf, tenant_id) {
|
||||
Ok(tenant_conf) => {
|
||||
tenant.update_tenant_config(tenant_conf);
|
||||
tenant.activate(false);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read config for tenant {tenant_id}, disabling tenant: {e:?}");
|
||||
tenant.set_state(TenantState::Broken);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
v.insert(Arc::clone(&tenant));
|
||||
tenant
|
||||
}
|
||||
};
|
||||
drop(tenants_accessor);
|
||||
match local_timelines {
|
||||
TenantAttachData::Broken(e) => warn!("{}", e),
|
||||
TenantAttachData::Ready(ref timelines) => {
|
||||
info!("Attaching {} timelines for {tenant_id}", timelines.len());
|
||||
debug!("Timelines to attach: {local_timelines:?}");
|
||||
let has_timelines = !timelines.is_empty();
|
||||
let timelines_to_attach = timelines
|
||||
.iter()
|
||||
.map(|(&k, v)| (k, v.metadata().to_owned()))
|
||||
.collect();
|
||||
match tenant.init_attach_timelines(timelines_to_attach) {
|
||||
Ok(()) => {
|
||||
info!("successfully loaded local timelines for tenant {tenant_id}");
|
||||
tenant.activate(has_timelines);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to attach tenant timelines: {e:?}");
|
||||
tenant.set_state(TenantState::Broken);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// On error, print it, but continue with the other tenants. If we error out
|
||||
// here, the pageserver startup fails altogether, causing outage for *all*
|
||||
// tenants. That seems worse.
|
||||
error!(
|
||||
"Failed to list tenants dir entry {:?} in directory {}, reason: {:?}",
|
||||
dir_entry,
|
||||
tenants_dir.display(),
|
||||
e,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Processed {number_of_tenants} local tenants during attach")
|
||||
info!("Processed {number_of_tenants} local tenants at startup");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_local_tenant(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_path: &Path,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> anyhow::Result<Option<Arc<Tenant>>> {
|
||||
if !tenant_path.is_dir() {
|
||||
anyhow::bail!("tenant_path is not a directory: {tenant_path:?}")
|
||||
}
|
||||
|
||||
let is_empty = tenant_path
|
||||
.is_empty_dir()
|
||||
.context("check whether tenant_path is an empty dir")?;
|
||||
if is_empty {
|
||||
info!("skipping empty tenant directory {tenant_path:?}");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let tenant_id = tenant_path
|
||||
.file_name()
|
||||
.and_then(OsStr::to_str)
|
||||
.unwrap_or_default()
|
||||
.parse::<TenantId>()
|
||||
.context("Could not parse tenant id out of the tenant dir name")?;
|
||||
|
||||
let tenant = if conf.tenant_attaching_mark_file_path(&tenant_id).exists() {
|
||||
info!("tenant {tenant_id} has attaching mark file, resuming its attach operation");
|
||||
if let Some(remote_storage) = remote_storage {
|
||||
Tenant::spawn_attach(conf, tenant_id, &remote_storage)
|
||||
} else {
|
||||
warn!("tenant {tenant_id} has attaching mark file, but pageserver has no remote storage configured");
|
||||
Tenant::create_broken_tenant(conf, tenant_id)
|
||||
}
|
||||
} else {
|
||||
info!("tenant {tenant_id} is assumed to be loadable, starting load operation");
|
||||
// Start loading the tenant into memory. It will initially be in Loading state.
|
||||
Tenant::spawn_load(conf, tenant_id, remote_storage)
|
||||
};
|
||||
Ok(Some(tenant))
|
||||
}
|
||||
|
||||
///
|
||||
@@ -218,7 +170,7 @@ pub async fn shutdown_all_tenants() {
|
||||
for (_, tenant) in m.drain() {
|
||||
if tenant.is_active() {
|
||||
// updates tenant state, forbidding new GC and compaction iterations from starting
|
||||
tenant.set_state(TenantState::Paused);
|
||||
tenant.set_stopping();
|
||||
tenants_to_shut_down.push(tenant)
|
||||
}
|
||||
}
|
||||
@@ -247,156 +199,41 @@ pub async fn shutdown_all_tenants() {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_tenant_files(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_conf: TenantConfOpt,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<()> {
|
||||
let target_tenant_directory = conf.tenant_path(&tenant_id);
|
||||
anyhow::ensure!(
|
||||
!target_tenant_directory.exists(),
|
||||
"cannot create new tenant repo: '{tenant_id}' directory already exists",
|
||||
);
|
||||
|
||||
let temporary_tenant_dir =
|
||||
path_with_suffix_extension(&target_tenant_directory, TEMP_FILE_SUFFIX);
|
||||
debug!(
|
||||
"Creating temporary directory structure in {}",
|
||||
temporary_tenant_dir.display()
|
||||
);
|
||||
|
||||
// top-level dir may exist if we are creating it through CLI
|
||||
crashsafe::create_dir_all(&temporary_tenant_dir).with_context(|| {
|
||||
format!(
|
||||
"could not create temporary tenant directory {}",
|
||||
temporary_tenant_dir.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let creation_result = try_create_target_tenant_dir(
|
||||
conf,
|
||||
tenant_conf,
|
||||
tenant_id,
|
||||
&temporary_tenant_dir,
|
||||
&target_tenant_directory,
|
||||
);
|
||||
|
||||
if creation_result.is_err() {
|
||||
error!("Failed to create directory structure for tenant {tenant_id}, cleaning tmp data");
|
||||
if let Err(e) = fs::remove_dir_all(&temporary_tenant_dir) {
|
||||
error!("Failed to remove temporary tenant directory {temporary_tenant_dir:?}: {e}")
|
||||
} else if let Err(e) = crashsafe::fsync(&temporary_tenant_dir) {
|
||||
error!(
|
||||
"Failed to fsync removed temporary tenant directory {temporary_tenant_dir:?}: {e}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
creation_result
|
||||
}
|
||||
|
||||
fn try_create_target_tenant_dir(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_conf: TenantConfOpt,
|
||||
tenant_id: TenantId,
|
||||
temporary_tenant_dir: &Path,
|
||||
target_tenant_directory: &Path,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let temporary_tenant_timelines_dir = rebase_directory(
|
||||
&conf.timelines_path(&tenant_id),
|
||||
target_tenant_directory,
|
||||
temporary_tenant_dir,
|
||||
)
|
||||
.with_context(|| format!("Failed to resolve tenant {tenant_id} temporary timelines dir"))?;
|
||||
let temporary_tenant_config_path = rebase_directory(
|
||||
&conf.tenant_config_path(tenant_id),
|
||||
target_tenant_directory,
|
||||
temporary_tenant_dir,
|
||||
)
|
||||
.with_context(|| format!("Failed to resolve tenant {tenant_id} temporary config path"))?;
|
||||
|
||||
Tenant::persist_tenant_config(&temporary_tenant_config_path, tenant_conf, true).with_context(
|
||||
|| {
|
||||
format!(
|
||||
"Failed to write tenant {} config to {}",
|
||||
tenant_id,
|
||||
temporary_tenant_config_path.display()
|
||||
)
|
||||
},
|
||||
)?;
|
||||
crashsafe::create_dir(&temporary_tenant_timelines_dir).with_context(|| {
|
||||
format!(
|
||||
"could not create tenant {} temporary timelines directory {}",
|
||||
tenant_id,
|
||||
temporary_tenant_timelines_dir.display()
|
||||
)
|
||||
})?;
|
||||
fail::fail_point!("tenant-creation-before-tmp-rename", |_| {
|
||||
anyhow::bail!("failpoint tenant-creation-before-tmp-rename");
|
||||
});
|
||||
|
||||
fs::rename(&temporary_tenant_dir, target_tenant_directory).with_context(|| {
|
||||
format!(
|
||||
"failed to move tenant {} temporary directory {} into the permanent one {}",
|
||||
tenant_id,
|
||||
temporary_tenant_dir.display(),
|
||||
target_tenant_directory.display()
|
||||
)
|
||||
})?;
|
||||
let target_dir_parent = target_tenant_directory.parent().with_context(|| {
|
||||
format!(
|
||||
"Failed to get tenant {} dir parent for {}",
|
||||
tenant_id,
|
||||
target_tenant_directory.display()
|
||||
)
|
||||
})?;
|
||||
crashsafe::fsync(target_dir_parent).with_context(|| {
|
||||
format!(
|
||||
"Failed to fsync renamed directory's parent {} for tenant {}",
|
||||
target_dir_parent.display(),
|
||||
tenant_id,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rebase_directory(original_path: &Path, base: &Path, new_base: &Path) -> anyhow::Result<PathBuf> {
|
||||
let relative_path = original_path.strip_prefix(base).with_context(|| {
|
||||
format!(
|
||||
"Failed to strip base prefix '{}' off path '{}'",
|
||||
base.display(),
|
||||
original_path.display()
|
||||
)
|
||||
})?;
|
||||
Ok(new_base.join(relative_path))
|
||||
}
|
||||
|
||||
pub fn create_tenant(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_conf: TenantConfOpt,
|
||||
tenant_id: TenantId,
|
||||
remote_index: RemoteIndex,
|
||||
) -> anyhow::Result<Option<TenantId>> {
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> anyhow::Result<Option<Arc<Tenant>>> {
|
||||
match tenants_state::write_tenants().entry(tenant_id) {
|
||||
hash_map::Entry::Occupied(_) => {
|
||||
debug!("tenant {tenant_id} already exists");
|
||||
Ok(None)
|
||||
}
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenant_id));
|
||||
create_tenant_files(conf, tenant_conf, tenant_id)?;
|
||||
let tenant = Arc::new(Tenant::new(
|
||||
conf,
|
||||
tenant_conf,
|
||||
wal_redo_manager,
|
||||
tenant_id,
|
||||
remote_index,
|
||||
conf.remote_storage_config.is_some(),
|
||||
));
|
||||
tenant.activate(false);
|
||||
v.insert(tenant);
|
||||
Ok(Some(tenant_id))
|
||||
// Hold the write_tenants() lock, since all of this is local IO.
|
||||
// If this section ever becomes contentious, introduce a new `TenantState::Creating`.
|
||||
let tenant_directory =
|
||||
super::tenant::create_tenant_files(conf, tenant_conf, tenant_id)?;
|
||||
let created_tenant = load_local_tenant(conf, &tenant_directory, remote_storage)?;
|
||||
match created_tenant {
|
||||
None => {
|
||||
// We get None in case the directory is empty.
|
||||
// This shouldn't happen here, because we just created the directory.
|
||||
// So, skip any cleanup work for now, we don't know how we reached this state.
|
||||
anyhow::bail!("we just created the tenant directory, it can't be empty");
|
||||
}
|
||||
Some(tenant) => {
|
||||
anyhow::ensure!(
|
||||
tenant_id == tenant.tenant_id(),
|
||||
"loaded created tenant has unexpected tenant id (expect {} != actual {})",
|
||||
tenant_id,
|
||||
tenant.tenant_id()
|
||||
);
|
||||
v.insert(Arc::clone(&tenant));
|
||||
Ok(Some(tenant))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -420,7 +257,10 @@ pub fn get_tenant(tenant_id: TenantId, active_only: bool) -> anyhow::Result<Arc<
|
||||
.get(&tenant_id)
|
||||
.with_context(|| format!("Tenant {tenant_id} not found in the local state"))?;
|
||||
if active_only && !tenant.is_active() {
|
||||
anyhow::bail!("Tenant {tenant_id} is not active")
|
||||
anyhow::bail!(
|
||||
"Tenant {tenant_id} is not active. Current state: {:?}",
|
||||
tenant.current_state()
|
||||
)
|
||||
} else {
|
||||
Ok(Arc::clone(tenant))
|
||||
}
|
||||
@@ -450,10 +290,7 @@ pub async fn delete_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> an
|
||||
info!("timeline task shutdown completed");
|
||||
match get_tenant(tenant_id, true) {
|
||||
Ok(tenant) => {
|
||||
tenant.delete_timeline(timeline_id)?;
|
||||
if tenant.list_timelines().is_empty() {
|
||||
tenant.activate(false);
|
||||
}
|
||||
tenant.delete_timeline(timeline_id).await?;
|
||||
}
|
||||
Err(e) => anyhow::bail!("Cannot access tenant {tenant_id} in local tenant state: {e:?}"),
|
||||
}
|
||||
@@ -473,7 +310,7 @@ pub async fn detach_tenant(
|
||||
None => anyhow::bail!("Tenant not found for id {tenant_id}"),
|
||||
};
|
||||
|
||||
tenant.set_state(TenantState::Paused);
|
||||
tenant.set_stopping();
|
||||
// shutdown all tenant and timeline tasks: gc, compaction, page service)
|
||||
task_mgr::shutdown_tasks(None, Some(tenant_id), None).await;
|
||||
|
||||
@@ -497,374 +334,94 @@ pub async fn detach_tenant(
|
||||
///
|
||||
/// Get list of tenants, for the mgmt API
|
||||
///
|
||||
pub fn list_tenant_info(remote_index: &RemoteTimelineIndex) -> Vec<TenantInfo> {
|
||||
pub fn list_tenants() -> Vec<(TenantId, TenantState)> {
|
||||
tenants_state::read_tenants()
|
||||
.iter()
|
||||
.map(|(id, tenant)| {
|
||||
let has_in_progress_downloads = remote_index
|
||||
.tenant_entry(id)
|
||||
.map(|entry| entry.has_in_progress_downloads());
|
||||
|
||||
// TODO this is not correct when we might have remote storage sync disabled:
|
||||
// we keep `RemoteTimelineIndex` in memory anyway for simplicity and this error message is printed still
|
||||
if has_in_progress_downloads.is_none() {
|
||||
error!("timeline is not found in remote index while it is present in the tenants registry")
|
||||
}
|
||||
|
||||
TenantInfo {
|
||||
id: *id,
|
||||
state: tenant.current_state(),
|
||||
current_physical_size: None,
|
||||
has_in_progress_downloads,
|
||||
}
|
||||
})
|
||||
.map(|(id, tenant)| (*id, tenant.current_state()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TenantAttachData {
|
||||
Ready(HashMap<TimelineId, TimelineLocalFiles>),
|
||||
Broken(anyhow::Error),
|
||||
}
|
||||
/// Attempts to collect information about all tenant and timelines, existing on the local FS.
|
||||
/// If finds any, deletes all temporary files and directories, created before. Also removes empty directories,
|
||||
/// that may appear due to such removals.
|
||||
/// Does not fail on particular timeline or tenant collection errors, rather logging them and ignoring the entities.
|
||||
fn local_tenant_timeline_files(
|
||||
config: &'static PageServerConf,
|
||||
) -> anyhow::Result<HashMap<TenantId, TenantAttachData>> {
|
||||
let _entered = info_span!("local_tenant_timeline_files").entered();
|
||||
|
||||
let mut local_tenant_timeline_files = HashMap::new();
|
||||
let tenants_dir = config.tenants_path();
|
||||
for tenants_dir_entry in fs::read_dir(&tenants_dir)
|
||||
.with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))?
|
||||
{
|
||||
match &tenants_dir_entry {
|
||||
Ok(tenants_dir_entry) => {
|
||||
let tenant_dir_path = tenants_dir_entry.path();
|
||||
if is_temporary(&tenant_dir_path) {
|
||||
info!(
|
||||
"Found temporary tenant directory, removing: {}",
|
||||
tenant_dir_path.display()
|
||||
);
|
||||
if let Err(e) = fs::remove_dir_all(&tenant_dir_path) {
|
||||
error!(
|
||||
"Failed to remove temporary directory '{}': {:?}",
|
||||
tenant_dir_path.display(),
|
||||
e
|
||||
);
|
||||
}
|
||||
} else {
|
||||
match collect_timelines_for_tenant(config, &tenant_dir_path) {
|
||||
Ok((tenant_id, TenantAttachData::Broken(e))) => {
|
||||
local_tenant_timeline_files.entry(tenant_id).or_insert(TenantAttachData::Broken(e));
|
||||
},
|
||||
Ok((tenant_id, TenantAttachData::Ready(collected_files))) => {
|
||||
if collected_files.is_empty() {
|
||||
match remove_if_empty(&tenant_dir_path) {
|
||||
Ok(true) => info!("Removed empty tenant directory {}", tenant_dir_path.display()),
|
||||
Ok(false) => {
|
||||
// insert empty timeline entry: it has some non-temporary files inside that we cannot remove
|
||||
// so make obvious for HTTP API callers, that something exists there and try to load the tenant
|
||||
let _ = local_tenant_timeline_files.entry(tenant_id).or_insert_with(|| TenantAttachData::Ready(HashMap::new()));
|
||||
},
|
||||
Err(e) => error!("Failed to remove empty tenant directory: {e:?}"),
|
||||
}
|
||||
} else {
|
||||
match local_tenant_timeline_files.entry(tenant_id) {
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
entry.insert(TenantAttachData::Ready(collected_files));
|
||||
}
|
||||
hash_map::Entry::Occupied(entry) =>{
|
||||
if let TenantAttachData::Ready(old_timelines) = entry.into_mut() {
|
||||
old_timelines.extend(collected_files);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => error!(
|
||||
"Failed to collect tenant files from dir '{}' for entry {:?}, reason: {:#}",
|
||||
tenants_dir.display(),
|
||||
tenants_dir_entry,
|
||||
e
|
||||
),
|
||||
}
|
||||
/// Execute Attach mgmt API command.
|
||||
///
|
||||
/// Downloading all the tenant data is performed in the background, this merely
|
||||
/// spawns the background task and returns quickly.
|
||||
pub async fn attach_tenant(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: TenantId,
|
||||
remote_storage: &GenericRemoteStorage,
|
||||
) -> anyhow::Result<()> {
|
||||
match tenants_state::write_tenants().entry(tenant_id) {
|
||||
hash_map::Entry::Occupied(e) => {
|
||||
// Cannot attach a tenant that already exists. The error message depends on
|
||||
// the state it's in.
|
||||
match e.get().current_state() {
|
||||
TenantState::Attaching => {
|
||||
anyhow::bail!("tenant {tenant_id} attach is already in progress")
|
||||
}
|
||||
current_state => {
|
||||
anyhow::bail!("tenant already exists, current state: {current_state:?}")
|
||||
}
|
||||
}
|
||||
Err(e) => error!(
|
||||
"Failed to list tenants dir entry {:?} in directory {}, reason: {:?}",
|
||||
tenants_dir_entry,
|
||||
tenants_dir.display(),
|
||||
e
|
||||
),
|
||||
}
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
let tenant = Tenant::spawn_attach(conf, tenant_id, remote_storage);
|
||||
v.insert(tenant);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Collected files for {} tenants",
|
||||
local_tenant_timeline_files.len(),
|
||||
);
|
||||
Ok(local_tenant_timeline_files)
|
||||
}
|
||||
|
||||
fn remove_if_empty(tenant_dir_path: &Path) -> anyhow::Result<bool> {
|
||||
let directory_is_empty = tenant_dir_path
|
||||
.read_dir()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to read directory '{}' contents",
|
||||
tenant_dir_path.display()
|
||||
)
|
||||
})?
|
||||
.next()
|
||||
.is_none();
|
||||
#[cfg(feature = "testing")]
|
||||
use {
|
||||
crate::repository::GcResult, pageserver_api::models::TimelineGcRequest,
|
||||
utils::http::error::ApiError,
|
||||
};
|
||||
|
||||
if directory_is_empty {
|
||||
fs::remove_dir_all(&tenant_dir_path).with_context(|| {
|
||||
format!(
|
||||
"Failed to remove empty directory '{}'",
|
||||
tenant_dir_path.display(),
|
||||
)
|
||||
})?;
|
||||
#[cfg(feature = "testing")]
|
||||
pub fn immediate_gc(
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
gc_req: TimelineGcRequest,
|
||||
) -> Result<tokio::sync::oneshot::Receiver<Result<GcResult, anyhow::Error>>, ApiError> {
|
||||
let guard = tenants_state::read_tenants();
|
||||
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
let tenant = guard
|
||||
.get(&tenant_id)
|
||||
.map(Arc::clone)
|
||||
.with_context(|| format!("Tenant {tenant_id} not found"))
|
||||
.map_err(ApiError::NotFound)?;
|
||||
|
||||
fn is_temporary(path: &Path) -> bool {
|
||||
match path.file_name() {
|
||||
Some(name) => name.to_string_lossy().ends_with(TEMP_FILE_SUFFIX),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
|
||||
// Use tenant's pitr setting
|
||||
let pitr = tenant.get_pitr_interval();
|
||||
|
||||
fn is_uninit_mark(path: &Path) -> bool {
|
||||
match path.file_name() {
|
||||
Some(name) => name
|
||||
.to_string_lossy()
|
||||
.ends_with(TIMELINE_UNINIT_MARK_SUFFIX),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_timelines_for_tenant(
|
||||
config: &'static PageServerConf,
|
||||
tenant_path: &Path,
|
||||
) -> anyhow::Result<(TenantId, TenantAttachData)> {
|
||||
let tenant_id = tenant_path
|
||||
.file_name()
|
||||
.and_then(OsStr::to_str)
|
||||
.unwrap_or_default()
|
||||
.parse::<TenantId>()
|
||||
.context("Could not parse tenant id out of the tenant dir name")?;
|
||||
let timelines_dir = config.timelines_path(&tenant_id);
|
||||
|
||||
if !timelines_dir.as_path().is_dir() {
|
||||
return Ok((
|
||||
tenant_id,
|
||||
TenantAttachData::Broken(anyhow::anyhow!(
|
||||
"Tenant {} has no timelines directory at {}",
|
||||
tenant_id,
|
||||
timelines_dir.display()
|
||||
)),
|
||||
));
|
||||
}
|
||||
|
||||
let mut tenant_timelines = HashMap::new();
|
||||
for timelines_dir_entry in fs::read_dir(&timelines_dir)
|
||||
.with_context(|| format!("Failed to list timelines dir entry for tenant {tenant_id}"))?
|
||||
{
|
||||
match timelines_dir_entry {
|
||||
Ok(timelines_dir_entry) => {
|
||||
let timeline_dir = timelines_dir_entry.path();
|
||||
if is_temporary(&timeline_dir) {
|
||||
info!(
|
||||
"Found temporary timeline directory, removing: {}",
|
||||
timeline_dir.display()
|
||||
);
|
||||
if let Err(e) = fs::remove_dir_all(&timeline_dir) {
|
||||
error!(
|
||||
"Failed to remove temporary directory '{}': {:?}",
|
||||
timeline_dir.display(),
|
||||
e
|
||||
);
|
||||
}
|
||||
} else if is_uninit_mark(&timeline_dir) {
|
||||
let timeline_uninit_mark_file = &timeline_dir;
|
||||
info!(
|
||||
"Found an uninit mark file {}, removing the timeline and its uninit mark",
|
||||
timeline_uninit_mark_file.display()
|
||||
);
|
||||
let timeline_id = timeline_uninit_mark_file
|
||||
.file_stem()
|
||||
.and_then(OsStr::to_str)
|
||||
.unwrap_or_default()
|
||||
.parse::<TimelineId>()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Could not parse timeline id out of the timeline uninit mark name {}",
|
||||
timeline_uninit_mark_file.display()
|
||||
)
|
||||
})?;
|
||||
let timeline_dir = config.timeline_path(&timeline_id, &tenant_id);
|
||||
if let Err(e) =
|
||||
remove_timeline_and_uninit_mark(&timeline_dir, timeline_uninit_mark_file)
|
||||
{
|
||||
error!("Failed to clean up uninit marked timeline: {e:?}");
|
||||
}
|
||||
} else {
|
||||
let timeline_id = timeline_dir
|
||||
.file_name()
|
||||
.and_then(OsStr::to_str)
|
||||
.unwrap_or_default()
|
||||
.parse::<TimelineId>()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Could not parse timeline id out of the timeline dir name {}",
|
||||
timeline_dir.display()
|
||||
)
|
||||
})?;
|
||||
let timeline_uninit_mark_file =
|
||||
config.timeline_uninit_mark_file_path(tenant_id, timeline_id);
|
||||
if timeline_uninit_mark_file.exists() {
|
||||
info!("Found an uninit mark file for timeline {tenant_id}/{timeline_id}, removing the timeline and its uninit mark");
|
||||
if let Err(e) = remove_timeline_and_uninit_mark(
|
||||
&timeline_dir,
|
||||
&timeline_uninit_mark_file,
|
||||
) {
|
||||
error!("Failed to clean up uninit marked timeline: {e:?}");
|
||||
}
|
||||
} else {
|
||||
match collect_timeline_files(&timeline_dir) {
|
||||
Ok((metadata, timeline_files)) => {
|
||||
tenant_timelines.insert(
|
||||
timeline_id,
|
||||
TimelineLocalFiles::collected(metadata, timeline_files),
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to process timeline dir contents at '{}', reason: {:?}",
|
||||
timeline_dir.display(),
|
||||
e
|
||||
);
|
||||
match remove_if_empty(&timeline_dir) {
|
||||
Ok(true) => info!(
|
||||
"Removed empty timeline directory {}",
|
||||
timeline_dir.display()
|
||||
),
|
||||
Ok(false) => (),
|
||||
Err(e) => {
|
||||
error!("Failed to remove empty timeline directory: {e:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list timelines for entry tenant {tenant_id}, reason: {e:?}")
|
||||
// Run in task_mgr to avoid race with detach operation
|
||||
let (task_done, wait_task_done) = tokio::sync::oneshot::channel();
|
||||
task_mgr::spawn(
|
||||
&tokio::runtime::Handle::current(),
|
||||
TaskKind::GarbageCollector,
|
||||
Some(tenant_id),
|
||||
Some(timeline_id),
|
||||
&format!("timeline_gc_handler garbage collection run for tenant {tenant_id} timeline {timeline_id}"),
|
||||
false,
|
||||
async move {
|
||||
fail::fail_point!("immediate_gc_task_pre");
|
||||
let result = tenant
|
||||
.gc_iteration(Some(timeline_id), gc_horizon, pitr, true)
|
||||
.instrument(info_span!("manual_gc", tenant = %tenant_id, timeline = %timeline_id))
|
||||
.await;
|
||||
// FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
|
||||
// better once the types support it.
|
||||
match task_done.send(result) {
|
||||
Ok(_) => (),
|
||||
Err(result) => error!("failed to send gc result: {result:?}"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
if tenant_timelines.is_empty() {
|
||||
// this is normal, we've removed all broken, empty and temporary timeline dirs
|
||||
// but should allow the tenant to stay functional and allow creating new timelines
|
||||
// on a restart, we require tenants to have the timelines dir, so leave it on disk
|
||||
debug!("Tenant {tenant_id} has no timelines loaded");
|
||||
}
|
||||
|
||||
Ok((tenant_id, TenantAttachData::Ready(tenant_timelines)))
|
||||
}
|
||||
|
||||
fn remove_timeline_and_uninit_mark(timeline_dir: &Path, uninit_mark: &Path) -> anyhow::Result<()> {
|
||||
fs::remove_dir_all(&timeline_dir)
|
||||
.or_else(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
// we can leave the uninit mark without a timeline dir,
|
||||
// just remove the mark then
|
||||
Ok(())
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
})
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to remove unit marked timeline directory {}",
|
||||
timeline_dir.display()
|
||||
)
|
||||
})?;
|
||||
fs::remove_file(&uninit_mark).with_context(|| {
|
||||
format!(
|
||||
"Failed to remove timeline uninit mark file {}",
|
||||
uninit_mark.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// discover timeline files and extract timeline metadata
|
||||
// NOTE: ephemeral files are excluded from the list
|
||||
fn collect_timeline_files(
|
||||
timeline_dir: &Path,
|
||||
) -> anyhow::Result<(TimelineMetadata, HashMap<PathBuf, LayerFileMetadata>)> {
|
||||
let mut timeline_files = HashMap::new();
|
||||
let mut timeline_metadata_path = None;
|
||||
|
||||
let timeline_dir_entries =
|
||||
fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?;
|
||||
for entry in timeline_dir_entries {
|
||||
let entry_path = entry.context("Failed to list timeline dir entry")?.path();
|
||||
let metadata = entry_path.metadata()?;
|
||||
|
||||
if metadata.is_file() {
|
||||
if entry_path.file_name().and_then(OsStr::to_str) == Some(METADATA_FILE_NAME) {
|
||||
timeline_metadata_path = Some(entry_path);
|
||||
} else if is_ephemeral_file(&entry_path.file_name().unwrap().to_string_lossy()) {
|
||||
debug!("skipping ephemeral file {}", entry_path.display());
|
||||
continue;
|
||||
} else if is_temporary(&entry_path) {
|
||||
info!("removing temp timeline file at {}", entry_path.display());
|
||||
fs::remove_file(&entry_path).with_context(|| {
|
||||
format!(
|
||||
"failed to remove temp download file at {}",
|
||||
entry_path.display()
|
||||
)
|
||||
})?;
|
||||
} else {
|
||||
let layer_metadata = LayerFileMetadata::new(metadata.len());
|
||||
timeline_files.insert(entry_path, layer_metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME (rodionov) if attach call succeeded, and then pageserver is restarted before download is completed
|
||||
// then attach is lost. There would be no retries for that,
|
||||
// initial collect will fail because there is no metadata.
|
||||
// We either need to start download if we see empty dir after restart or attach caller should
|
||||
// be aware of that and retry attach if awaits_download for timeline switched from true to false
|
||||
// but timelinne didn't appear locally.
|
||||
// Check what happens with remote index in that case.
|
||||
let timeline_metadata_path = match timeline_metadata_path {
|
||||
Some(path) => path,
|
||||
None => anyhow::bail!("No metadata file found in the timeline directory"),
|
||||
};
|
||||
let metadata = TimelineMetadata::from_bytes(
|
||||
&fs::read(&timeline_metadata_path).context("Failed to read timeline metadata file")?,
|
||||
)
|
||||
.context("Failed to parse timeline metadata file bytes")?;
|
||||
|
||||
anyhow::ensure!(
|
||||
metadata.ancestor_timeline().is_some() || !timeline_files.is_empty(),
|
||||
"Timeline has no ancestor and no layer files"
|
||||
);
|
||||
|
||||
Ok((metadata, timeline_files))
|
||||
// drop the guard until after we've spawned the task so that timeline shutdown will wait for the task
|
||||
drop(guard);
|
||||
|
||||
Ok(wait_task_done)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::metrics::TENANT_TASK_EVENTS;
|
||||
use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME};
|
||||
use crate::task_mgr;
|
||||
use crate::task_mgr::{TaskKind, BACKGROUND_RUNTIME};
|
||||
use crate::tenant::{Tenant, TenantState};
|
||||
use crate::tenant_mgr;
|
||||
use tracing::*;
|
||||
@@ -65,13 +66,17 @@ async fn compaction_loop(tenant_id: TenantId) {
|
||||
},
|
||||
};
|
||||
|
||||
// Run blocking part of the task
|
||||
|
||||
// Run compaction
|
||||
let mut sleep_duration = tenant.get_compaction_period();
|
||||
if let Err(e) = tenant.compaction_iteration() {
|
||||
sleep_duration = wait_duration;
|
||||
error!("Compaction failed, retrying in {:?}: {e:?}", sleep_duration);
|
||||
if sleep_duration == Duration::ZERO {
|
||||
info!("automatic compaction is disabled");
|
||||
// check again in 10 seconds, in case it's been enabled again.
|
||||
sleep_duration = Duration::from_secs(10);
|
||||
} else {
|
||||
// Run compaction
|
||||
if let Err(e) = tenant.compaction_iteration().await {
|
||||
sleep_duration = wait_duration;
|
||||
error!("Compaction failed, retrying in {:?}: {e:?}", sleep_duration);
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep
|
||||
@@ -112,15 +117,21 @@ async fn gc_loop(tenant_id: TenantId) {
|
||||
},
|
||||
};
|
||||
|
||||
// Run gc
|
||||
let gc_period = tenant.get_gc_period();
|
||||
let gc_horizon = tenant.get_gc_horizon();
|
||||
let mut sleep_duration = gc_period;
|
||||
if gc_horizon > 0 {
|
||||
if let Err(e) = tenant.gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), false).await
|
||||
{
|
||||
sleep_duration = wait_duration;
|
||||
error!("Gc failed, retrying in {:?}: {e:?}", sleep_duration);
|
||||
if sleep_duration == Duration::ZERO {
|
||||
info!("automatic GC is disabled");
|
||||
// check again in 10 seconds, in case it's been enabled again.
|
||||
sleep_duration = Duration::from_secs(10);
|
||||
} else {
|
||||
// Run gc
|
||||
if gc_horizon > 0 {
|
||||
if let Err(e) = tenant.gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), false).await
|
||||
{
|
||||
sleep_duration = wait_duration;
|
||||
error!("Gc failed, retrying in {:?}: {e:?}", sleep_duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +165,7 @@ async fn wait_for_active_tenant(
|
||||
};
|
||||
|
||||
// if the tenant has a proper status already, no need to wait for anything
|
||||
if tenant.should_run_tasks() {
|
||||
if tenant.current_state() == TenantState::Active {
|
||||
ControlFlow::Continue(tenant)
|
||||
} else {
|
||||
let mut tenant_state_updates = tenant.subscribe_for_state_updates();
|
||||
@@ -163,14 +174,12 @@ async fn wait_for_active_tenant(
|
||||
Ok(()) => {
|
||||
let new_state = *tenant_state_updates.borrow();
|
||||
match new_state {
|
||||
TenantState::Active {
|
||||
background_jobs_running: true,
|
||||
} => {
|
||||
debug!("Tenant state changed to active with background jobs enabled, continuing the task loop");
|
||||
TenantState::Active => {
|
||||
debug!("Tenant state changed to active, continuing the task loop");
|
||||
return ControlFlow::Continue(tenant);
|
||||
}
|
||||
state => {
|
||||
debug!("Not running the task loop, tenant is not active with background jobs enabled: {state:?}");
|
||||
debug!("Not running the task loop, tenant is not active: {state:?}");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
use anyhow::Context;
|
||||
use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes;
|
||||
use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment;
|
||||
use postgres_ffi::{page_is_new, page_set_lsn};
|
||||
use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn};
|
||||
|
||||
use anyhow::Result;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
@@ -612,20 +612,19 @@ impl<'a> WalIngest<'a> {
|
||||
forknum: FSM_FORKNUM,
|
||||
};
|
||||
|
||||
// FIXME: 'blkno' stored in the WAL record is the new size of the
|
||||
// heap. The formula for calculating the new size of the FSM is
|
||||
// pretty complicated (see FreeSpaceMapPrepareTruncateRel() in
|
||||
// PostgreSQL), and we should also clear bits in the tail FSM block,
|
||||
// and update the upper level FSM pages. None of that has been
|
||||
// implemented. What we do instead, is always just truncate the FSM
|
||||
// to zero blocks. That's bad for performance, but safe. (The FSM
|
||||
// isn't needed for correctness, so we could also leave garbage in
|
||||
// it. Seems more tidy to zap it away.)
|
||||
if rec.blkno != 0 {
|
||||
info!("Partial truncation of FSM is not supported");
|
||||
let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE;
|
||||
let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
|
||||
if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 {
|
||||
// Tail of last remaining FSM page has to be zeroed.
|
||||
// We are not precise here and instead of digging in FSM bitmap format just clear the whole page.
|
||||
modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?;
|
||||
fsm_physical_page_no += 1;
|
||||
}
|
||||
let nblocks = self.get_relsize(rel, modification.lsn)?;
|
||||
if nblocks > fsm_physical_page_no {
|
||||
// check if something to do: FSM is larger than truncate position
|
||||
self.put_rel_truncation(modification, rel, fsm_physical_page_no)?;
|
||||
}
|
||||
let num_fsm_blocks = 0;
|
||||
self.put_rel_truncation(modification, rel, num_fsm_blocks)?;
|
||||
}
|
||||
if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
|
||||
let rel = RelTag {
|
||||
@@ -635,16 +634,18 @@ impl<'a> WalIngest<'a> {
|
||||
forknum: VISIBILITYMAP_FORKNUM,
|
||||
};
|
||||
|
||||
// FIXME: Like with the FSM above, the logic to truncate the VM
|
||||
// correctly has not been implemented. Just zap it away completely,
|
||||
// always. Unlike the FSM, the VM must never have bits incorrectly
|
||||
// set. From a correctness point of view, it's always OK to clear
|
||||
// bits or remove it altogether, though.
|
||||
if rec.blkno != 0 {
|
||||
info!("Partial truncation of VM is not supported");
|
||||
let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE;
|
||||
if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 {
|
||||
// Tail of last remaining vm page has to be zeroed.
|
||||
// We are not precise here and instead of digging in VM bitmap format just clear the whole page.
|
||||
modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?;
|
||||
vm_page_no += 1;
|
||||
}
|
||||
let nblocks = self.get_relsize(rel, modification.lsn)?;
|
||||
if nblocks > vm_page_no {
|
||||
// check if something to do: VM is larger than truncate position
|
||||
self.put_rel_truncation(modification, rel, vm_page_no)?;
|
||||
}
|
||||
let num_vm_blocks = 0;
|
||||
self.put_rel_truncation(modification, rel, num_vm_blocks)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1090,9 +1091,9 @@ mod tests {
|
||||
Ok(walingest)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relsize() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_relsize")?.load();
|
||||
#[tokio::test]
|
||||
async fn test_relsize() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_relsize")?.load().await;
|
||||
let tline = create_test_timeline(&tenant, TIMELINE_ID, DEFAULT_PG_VERSION)?;
|
||||
let mut walingest = init_walingest_test(&*tline)?;
|
||||
|
||||
@@ -1218,9 +1219,9 @@ mod tests {
|
||||
|
||||
// Test what happens if we dropped a relation
|
||||
// and then created it again within the same layer.
|
||||
#[test]
|
||||
fn test_drop_extend() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_drop_extend")?.load();
|
||||
#[tokio::test]
|
||||
async fn test_drop_extend() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_drop_extend")?.load().await;
|
||||
let tline = create_test_timeline(&tenant, TIMELINE_ID, DEFAULT_PG_VERSION)?;
|
||||
let mut walingest = init_walingest_test(&*tline)?;
|
||||
|
||||
@@ -1258,9 +1259,9 @@ mod tests {
|
||||
// Test what happens if we truncated a relation
|
||||
// so that one of its segments was dropped
|
||||
// and then extended it again within the same layer.
|
||||
#[test]
|
||||
fn test_truncate_extend() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_truncate_extend")?.load();
|
||||
#[tokio::test]
|
||||
async fn test_truncate_extend() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_truncate_extend")?.load().await;
|
||||
let tline = create_test_timeline(&tenant, TIMELINE_ID, DEFAULT_PG_VERSION)?;
|
||||
let mut walingest = init_walingest_test(&*tline)?;
|
||||
|
||||
@@ -1346,9 +1347,9 @@ mod tests {
|
||||
|
||||
/// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
|
||||
/// split into multiple 1 GB segments in Postgres.
|
||||
#[test]
|
||||
fn test_large_rel() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_large_rel")?.load();
|
||||
#[tokio::test]
|
||||
async fn test_large_rel() -> Result<()> {
|
||||
let tenant = TenantHarness::create("test_large_rel")?.load().await;
|
||||
let tline = create_test_timeline(&tenant, TIMELINE_ID, DEFAULT_PG_VERSION)?;
|
||||
let mut walingest = init_walingest_test(&*tline)?;
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ use crate::{
|
||||
exponential_backoff, walreceiver::get_etcd_client, DEFAULT_BASE_BACKOFF_SECONDS,
|
||||
DEFAULT_MAX_BACKOFF_SECONDS,
|
||||
};
|
||||
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
||||
use utils::{
|
||||
id::{NodeId, TenantTimelineId},
|
||||
lsn::Lsn,
|
||||
@@ -49,6 +50,7 @@ pub fn spawn_connection_manager_task(
|
||||
wal_connect_timeout: Duration,
|
||||
lagging_wal_timeout: Duration,
|
||||
max_lsn_wal_lag: NonZeroU64,
|
||||
auth_token: Option<Arc<String>>,
|
||||
) {
|
||||
let mut etcd_client = get_etcd_client().clone();
|
||||
|
||||
@@ -69,6 +71,7 @@ pub fn spawn_connection_manager_task(
|
||||
wal_connect_timeout,
|
||||
lagging_wal_timeout,
|
||||
max_lsn_wal_lag,
|
||||
auth_token,
|
||||
);
|
||||
loop {
|
||||
select! {
|
||||
@@ -211,7 +214,7 @@ async fn connection_manager_loop_step(
|
||||
match new_state {
|
||||
// we're already active as walreceiver, no need to reactivate
|
||||
TimelineState::Active => continue,
|
||||
TimelineState::Broken | TimelineState::Paused | TimelineState::Suspended => return ControlFlow::Continue(new_state),
|
||||
TimelineState::Broken | TimelineState::Stopping | TimelineState::Suspended => return ControlFlow::Continue(new_state),
|
||||
}
|
||||
}
|
||||
Err(_sender_dropped_error) => return ControlFlow::Break(()),
|
||||
@@ -247,7 +250,7 @@ async fn connection_manager_loop_step(
|
||||
walreceiver_state
|
||||
.change_connection(
|
||||
new_candidate.safekeeper_id,
|
||||
new_candidate.wal_source_connstr,
|
||||
new_candidate.wal_source_connconf,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -359,6 +362,7 @@ struct WalreceiverState {
|
||||
wal_connection_retries: HashMap<NodeId, RetryInfo>,
|
||||
/// Data about all timelines, available for connection, fetched from etcd, grouped by their corresponding safekeeper node id.
|
||||
wal_stream_candidates: HashMap<NodeId, EtcdSkTimeline>,
|
||||
auth_token: Option<Arc<String>>,
|
||||
}
|
||||
|
||||
/// Current connection data.
|
||||
@@ -407,6 +411,7 @@ impl WalreceiverState {
|
||||
wal_connect_timeout: Duration,
|
||||
lagging_wal_timeout: Duration,
|
||||
max_lsn_wal_lag: NonZeroU64,
|
||||
auth_token: Option<Arc<String>>,
|
||||
) -> Self {
|
||||
let id = TenantTimelineId {
|
||||
tenant_id: timeline.tenant_id,
|
||||
@@ -421,11 +426,16 @@ impl WalreceiverState {
|
||||
wal_connection: None,
|
||||
wal_stream_candidates: HashMap::new(),
|
||||
wal_connection_retries: HashMap::new(),
|
||||
auth_token,
|
||||
}
|
||||
}
|
||||
|
||||
/// Shuts down the current connection (if any) and immediately starts another one with the given connection string.
|
||||
async fn change_connection(&mut self, new_sk_id: NodeId, new_wal_source_connstr: String) {
|
||||
async fn change_connection(
|
||||
&mut self,
|
||||
new_sk_id: NodeId,
|
||||
new_wal_source_connconf: PgConnectionConfig,
|
||||
) {
|
||||
self.drop_old_connection(true).await;
|
||||
|
||||
let id = self.id;
|
||||
@@ -435,7 +445,7 @@ impl WalreceiverState {
|
||||
async move {
|
||||
super::walreceiver_connection::handle_walreceiver_connection(
|
||||
timeline,
|
||||
new_wal_source_connstr,
|
||||
new_wal_source_connconf,
|
||||
events_sender,
|
||||
cancellation,
|
||||
connect_timeout,
|
||||
@@ -575,7 +585,7 @@ impl WalreceiverState {
|
||||
Some(existing_wal_connection) => {
|
||||
let connected_sk_node = existing_wal_connection.sk_id;
|
||||
|
||||
let (new_sk_id, new_safekeeper_etcd_data, new_wal_source_connstr) =
|
||||
let (new_sk_id, new_safekeeper_etcd_data, new_wal_source_connconf) =
|
||||
self.select_connection_candidate(Some(connected_sk_node))?;
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
@@ -586,7 +596,7 @@ impl WalreceiverState {
|
||||
if latest_interaciton > self.wal_connect_timeout {
|
||||
return Some(NewWalConnectionCandidate {
|
||||
safekeeper_id: new_sk_id,
|
||||
wal_source_connstr: new_wal_source_connstr,
|
||||
wal_source_connconf: new_wal_source_connconf,
|
||||
reason: ReconnectReason::NoKeepAlives {
|
||||
last_keep_alive: Some(
|
||||
existing_wal_connection.status.latest_connection_update,
|
||||
@@ -611,7 +621,7 @@ impl WalreceiverState {
|
||||
if new_sk_lsn_advantage >= self.max_lsn_wal_lag.get() {
|
||||
return Some(NewWalConnectionCandidate {
|
||||
safekeeper_id: new_sk_id,
|
||||
wal_source_connstr: new_wal_source_connstr,
|
||||
wal_source_connconf: new_wal_source_connconf,
|
||||
reason: ReconnectReason::LaggingWal {
|
||||
current_commit_lsn,
|
||||
new_commit_lsn,
|
||||
@@ -685,7 +695,7 @@ impl WalreceiverState {
|
||||
{
|
||||
return Some(NewWalConnectionCandidate {
|
||||
safekeeper_id: new_sk_id,
|
||||
wal_source_connstr: new_wal_source_connstr,
|
||||
wal_source_connconf: new_wal_source_connconf,
|
||||
reason: ReconnectReason::NoWalTimeout {
|
||||
current_lsn,
|
||||
current_commit_lsn,
|
||||
@@ -704,11 +714,11 @@ impl WalreceiverState {
|
||||
self.wal_connection.as_mut().unwrap().discovered_new_wal = discovered_new_wal;
|
||||
}
|
||||
None => {
|
||||
let (new_sk_id, _, new_wal_source_connstr) =
|
||||
let (new_sk_id, _, new_wal_source_connconf) =
|
||||
self.select_connection_candidate(None)?;
|
||||
return Some(NewWalConnectionCandidate {
|
||||
safekeeper_id: new_sk_id,
|
||||
wal_source_connstr: new_wal_source_connstr,
|
||||
wal_source_connconf: new_wal_source_connconf,
|
||||
reason: ReconnectReason::NoExistingConnection,
|
||||
});
|
||||
}
|
||||
@@ -726,7 +736,7 @@ impl WalreceiverState {
|
||||
fn select_connection_candidate(
|
||||
&self,
|
||||
node_to_omit: Option<NodeId>,
|
||||
) -> Option<(NodeId, &SkTimelineInfo, String)> {
|
||||
) -> Option<(NodeId, &SkTimelineInfo, PgConnectionConfig)> {
|
||||
self.applicable_connection_candidates()
|
||||
.filter(|&(sk_id, _, _)| Some(sk_id) != node_to_omit)
|
||||
.max_by_key(|(_, info, _)| info.commit_lsn)
|
||||
@@ -736,7 +746,7 @@ impl WalreceiverState {
|
||||
/// Some safekeepers are filtered by the retry cooldown.
|
||||
fn applicable_connection_candidates(
|
||||
&self,
|
||||
) -> impl Iterator<Item = (NodeId, &SkTimelineInfo, String)> {
|
||||
) -> impl Iterator<Item = (NodeId, &SkTimelineInfo, PgConnectionConfig)> {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
self.wal_stream_candidates
|
||||
@@ -754,9 +764,13 @@ impl WalreceiverState {
|
||||
})
|
||||
.filter_map(|(sk_id, etcd_info)| {
|
||||
let info = &etcd_info.timeline;
|
||||
match wal_stream_connection_string(
|
||||
match wal_stream_connection_config(
|
||||
self.id,
|
||||
info.safekeeper_connstr.as_deref()?,
|
||||
match &self.auth_token {
|
||||
None => None,
|
||||
Some(x) => Some(x),
|
||||
},
|
||||
) {
|
||||
Ok(connstr) => Some((*sk_id, info, connstr)),
|
||||
Err(e) => {
|
||||
@@ -797,10 +811,12 @@ impl WalreceiverState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug)]
|
||||
struct NewWalConnectionCandidate {
|
||||
safekeeper_id: NodeId,
|
||||
wal_source_connstr: String,
|
||||
wal_source_connconf: PgConnectionConfig,
|
||||
// This field is used in `derive(Debug)` only.
|
||||
#[allow(dead_code)]
|
||||
reason: ReconnectReason,
|
||||
}
|
||||
|
||||
@@ -828,39 +844,36 @@ enum ReconnectReason {
|
||||
},
|
||||
}
|
||||
|
||||
fn wal_stream_connection_string(
|
||||
fn wal_stream_connection_config(
|
||||
TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}: TenantTimelineId,
|
||||
listen_pg_addr_str: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let sk_connstr = format!("postgresql://no_user@{listen_pg_addr_str}/no_db");
|
||||
sk_connstr
|
||||
.parse()
|
||||
.context("bad url")
|
||||
.and_then(|url: url::Url| {
|
||||
let host = url.host_str().context("host is missing")?;
|
||||
let port = url.port().unwrap_or(5432); // default PG port
|
||||
|
||||
Ok(format!(
|
||||
"host={host} \
|
||||
port={port} \
|
||||
options='-c timeline_id={timeline_id} tenant_id={tenant_id}'"
|
||||
))
|
||||
})
|
||||
.with_context(|| format!("Failed to parse pageserver connection URL '{sk_connstr}'"))
|
||||
auth_token: Option<&str>,
|
||||
) -> anyhow::Result<PgConnectionConfig> {
|
||||
let (host, port) =
|
||||
parse_host_port(&listen_pg_addr_str).context("Unable to parse listen_pg_addr_str")?;
|
||||
let port = port.unwrap_or(5432);
|
||||
Ok(PgConnectionConfig::new_host_port(host, port)
|
||||
.extend_options([
|
||||
"-c".to_owned(),
|
||||
format!("timeline_id={}", timeline_id),
|
||||
format!("tenant_id={}", tenant_id),
|
||||
])
|
||||
.set_password(auth_token.map(|s| s.to_owned())))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tenant::harness::{TenantHarness, TIMELINE_ID};
|
||||
use url::Host;
|
||||
|
||||
#[test]
|
||||
fn no_connection_no_candidate() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn no_connection_no_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("no_connection_no_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
let lagging_wal_timeout = chrono::Duration::from_std(state.lagging_wal_timeout)?;
|
||||
@@ -951,7 +964,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn connection_no_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("connection_no_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
let connected_sk_id = NodeId(0);
|
||||
@@ -992,7 +1005,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1045,10 +1058,10 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_connection_candidate() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn no_connection_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("no_connection_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
state.wal_connection = None;
|
||||
@@ -1064,7 +1077,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1080,9 +1093,10 @@ mod tests {
|
||||
ReconnectReason::NoExistingConnection,
|
||||
"Should select new safekeeper due to missing connection, even if there's also a lag in the wal over the threshold"
|
||||
);
|
||||
assert!(only_candidate
|
||||
.wal_source_connstr
|
||||
.contains(DUMMY_SAFEKEEPER_CONNSTR));
|
||||
assert_eq!(
|
||||
only_candidate.wal_source_connconf.host(),
|
||||
&Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned())
|
||||
);
|
||||
|
||||
let selected_lsn = 100_000;
|
||||
state.wal_stream_candidates = HashMap::from([
|
||||
@@ -1116,7 +1130,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1151,9 +1165,10 @@ mod tests {
|
||||
ReconnectReason::NoExistingConnection,
|
||||
"Should select new safekeeper due to missing connection, even if there's also a lag in the wal over the threshold"
|
||||
);
|
||||
assert!(biggest_wal_candidate
|
||||
.wal_source_connstr
|
||||
.contains(DUMMY_SAFEKEEPER_CONNSTR));
|
||||
assert_eq!(
|
||||
biggest_wal_candidate.wal_source_connconf.host(),
|
||||
&Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1161,7 +1176,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn candidate_with_many_connection_failures() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("candidate_with_many_connection_failures")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
let current_lsn = Lsn(100_000).align();
|
||||
@@ -1181,7 +1196,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1199,7 +1214,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1229,7 +1244,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn lsn_wal_over_threshhold_current_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("lsn_wal_over_threshcurrent_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let current_lsn = Lsn(100_000).align();
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
@@ -1270,7 +1285,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1310,9 +1325,10 @@ mod tests {
|
||||
},
|
||||
"Should select bigger WAL safekeeper if it starts to lag enough"
|
||||
);
|
||||
assert!(over_threshcurrent_candidate
|
||||
.wal_source_connstr
|
||||
.contains("advanced_by_lsn_safekeeper"));
|
||||
assert_eq!(
|
||||
over_threshcurrent_candidate.wal_source_connconf.host(),
|
||||
&Host::Domain("advanced_by_lsn_safekeeper".to_owned())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1320,7 +1336,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn timeout_connection_threshhold_current_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("timeout_connection_threshhold_current_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let current_lsn = Lsn(100_000).align();
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
@@ -1361,7 +1377,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1384,9 +1400,10 @@ mod tests {
|
||||
}
|
||||
unexpected => panic!("Unexpected reason: {unexpected:?}"),
|
||||
}
|
||||
assert!(over_threshcurrent_candidate
|
||||
.wal_source_connstr
|
||||
.contains(DUMMY_SAFEKEEPER_CONNSTR));
|
||||
assert_eq!(
|
||||
over_threshcurrent_candidate.wal_source_connconf.host(),
|
||||
&Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1394,7 +1411,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn timeout_wal_over_threshhold_current_candidate() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("timeout_wal_over_threshhold_current_candidate")?;
|
||||
let mut state = dummy_state(&harness);
|
||||
let mut state = dummy_state(&harness).await;
|
||||
let current_lsn = Lsn(100_000).align();
|
||||
let new_lsn = Lsn(100_100).align();
|
||||
let now = Utc::now().naive_utc();
|
||||
@@ -1434,7 +1451,7 @@ mod tests {
|
||||
peer_horizon_lsn: None,
|
||||
local_start_lsn: None,
|
||||
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()),
|
||||
safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()),
|
||||
},
|
||||
etcd_version: 0,
|
||||
latest_update: now,
|
||||
@@ -1463,16 +1480,17 @@ mod tests {
|
||||
}
|
||||
unexpected => panic!("Unexpected reason: {unexpected:?}"),
|
||||
}
|
||||
assert!(over_threshcurrent_candidate
|
||||
.wal_source_connstr
|
||||
.contains(DUMMY_SAFEKEEPER_CONNSTR));
|
||||
assert_eq!(
|
||||
over_threshcurrent_candidate.wal_source_connconf.host(),
|
||||
&Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const DUMMY_SAFEKEEPER_CONNSTR: &str = "safekeeper_connstr";
|
||||
const DUMMY_SAFEKEEPER_HOST: &str = "safekeeper_connstr";
|
||||
|
||||
fn dummy_state(harness: &TenantHarness<'_>) -> WalreceiverState {
|
||||
async fn dummy_state(harness: &TenantHarness<'_>) -> WalreceiverState {
|
||||
WalreceiverState {
|
||||
id: TenantTimelineId {
|
||||
tenant_id: harness.tenant_id,
|
||||
@@ -1480,6 +1498,7 @@ mod tests {
|
||||
},
|
||||
timeline: harness
|
||||
.load()
|
||||
.await
|
||||
.create_empty_timeline(TIMELINE_ID, Lsn(0), crate::DEFAULT_PG_VERSION)
|
||||
.expect("Failed to create an empty timeline for dummy wal connection manager")
|
||||
.initialize()
|
||||
@@ -1490,6 +1509,7 @@ mod tests {
|
||||
wal_connection: None,
|
||||
wal_stream_candidates: HashMap::new(),
|
||||
wal_connection_retries: HashMap::new(),
|
||||
auth_token: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,13 +26,13 @@ use crate::{
|
||||
task_mgr::TaskKind,
|
||||
task_mgr::WALRECEIVER_RUNTIME,
|
||||
tenant::{Timeline, WalReceiverInfo},
|
||||
tenant_mgr,
|
||||
walingest::WalIngest,
|
||||
walrecord::DecodedWALRecord,
|
||||
};
|
||||
use postgres_connection::PgConnectionConfig;
|
||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||
use pq_proto::ReplicationFeedback;
|
||||
use utils::{id::TenantTimelineId, lsn::Lsn};
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
/// Status of the connection.
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -56,22 +56,23 @@ pub struct WalConnectionStatus {
|
||||
/// messages as we go.
|
||||
pub async fn handle_walreceiver_connection(
|
||||
timeline: Arc<Timeline>,
|
||||
wal_source_connstr: String,
|
||||
wal_source_connconf: PgConnectionConfig,
|
||||
events_sender: watch::Sender<TaskStateUpdate<WalConnectionStatus>>,
|
||||
mut cancellation: watch::Receiver<()>,
|
||||
connect_timeout: Duration,
|
||||
) -> anyhow::Result<()> {
|
||||
// Connect to the database in replication mode.
|
||||
info!("connecting to {wal_source_connstr}");
|
||||
let connect_cfg = format!("{wal_source_connstr} application_name=pageserver replication=true");
|
||||
info!("connecting to {wal_source_connconf:?}");
|
||||
|
||||
let (mut replication_client, connection) = time::timeout(
|
||||
connect_timeout,
|
||||
tokio_postgres::connect(&connect_cfg, postgres::NoTls),
|
||||
)
|
||||
.await
|
||||
.context("Timed out while waiting for walreceiver connection to open")?
|
||||
.context("Failed to open walreceiver connection")?;
|
||||
let (mut replication_client, connection) = {
|
||||
let mut config = wal_source_connconf.to_tokio_postgres_config();
|
||||
config.application_name("pageserver");
|
||||
config.replication_mode(tokio_postgres::config::ReplicationMode::Physical);
|
||||
time::timeout(connect_timeout, config.connect(postgres::NoTls))
|
||||
.await
|
||||
.context("Timed out while waiting for walreceiver connection to open")?
|
||||
.context("Failed to open walreceiver connection")?
|
||||
};
|
||||
|
||||
info!("connected!");
|
||||
let mut connection_status = WalConnectionStatus {
|
||||
@@ -139,10 +140,6 @@ pub async fn handle_walreceiver_connection(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tenant_id = timeline.tenant_id;
|
||||
let timeline_id = timeline.timeline_id;
|
||||
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
|
||||
|
||||
//
|
||||
// Start streaming the WAL, from where we left off previously.
|
||||
//
|
||||
@@ -291,19 +288,8 @@ pub async fn handle_walreceiver_connection(
|
||||
})?;
|
||||
|
||||
if let Some(last_lsn) = status_update {
|
||||
let remote_index = tenant.get_remote_index();
|
||||
let timeline_remote_consistent_lsn = remote_index
|
||||
.read()
|
||||
.await
|
||||
// here we either do not have this timeline in remote index
|
||||
// or there were no checkpoints for it yet
|
||||
.timeline_entry(&TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
})
|
||||
.map(|remote_timeline| remote_timeline.metadata.disk_consistent_lsn())
|
||||
// no checkpoint was uploaded
|
||||
.unwrap_or(Lsn(0));
|
||||
let timeline_remote_consistent_lsn =
|
||||
timeline.get_remote_consistent_lsn().unwrap_or(Lsn(0));
|
||||
|
||||
// The last LSN we processed. It is not guaranteed to survive pageserver crash.
|
||||
let write_lsn = u64::from(last_lsn);
|
||||
@@ -316,7 +302,7 @@ pub async fn handle_walreceiver_connection(
|
||||
|
||||
// Update the status about what we just received. This is shown in the mgmt API.
|
||||
let last_received_wal = WalReceiverInfo {
|
||||
wal_source_connstr: wal_source_connstr.to_owned(),
|
||||
wal_source_connconf: wal_source_connconf.clone(),
|
||||
last_received_msg_lsn: last_lsn,
|
||||
last_received_msg_ts: ts
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
|
||||
@@ -44,6 +44,7 @@ PGconn *pageserver_conn = NULL;
|
||||
WaitEventSet *pageserver_conn_wes = NULL;
|
||||
|
||||
char *page_server_connstring_raw;
|
||||
char *safekeeper_token_env;
|
||||
|
||||
int n_unflushed_requests = 0;
|
||||
int flush_every_n_requests = 8;
|
||||
@@ -418,6 +419,15 @@ pg_init_libpagestore(void)
|
||||
0, /* no flags required */
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomStringVariable("neon.safekeeper_token_env",
|
||||
"the environment variable containing JWT token for authentication with Safekeepers, the convention is to either unset or set to $ZENITH_AUTH_TOKEN",
|
||||
NULL,
|
||||
&safekeeper_token_env,
|
||||
NULL,
|
||||
PGC_POSTMASTER,
|
||||
0, /* no flags required */
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomStringVariable("neon.timeline_id",
|
||||
"Neon timeline_id the server is running on",
|
||||
NULL,
|
||||
@@ -481,6 +491,24 @@ pg_init_libpagestore(void)
|
||||
neon_timeline_walproposer = neon_timeline;
|
||||
neon_tenant_walproposer = neon_tenant;
|
||||
|
||||
/* retrieve the token for Safekeeper, if present */
|
||||
if (safekeeper_token_env != NULL) {
|
||||
if (safekeeper_token_env[0] != '$') {
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CONNECTION_EXCEPTION),
|
||||
errmsg("expected safekeeper auth token environment variable's name starting with $ but found: %s",
|
||||
safekeeper_token_env)));
|
||||
}
|
||||
neon_safekeeper_token_walproposer = getenv(&safekeeper_token_env[1]);
|
||||
if (!neon_safekeeper_token_walproposer) {
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CONNECTION_EXCEPTION),
|
||||
errmsg("cannot get safekeeper auth token, environment variable %s is not set",
|
||||
&safekeeper_token_env[1])));
|
||||
}
|
||||
neon_log(LOG, "using safekeeper auth token from environment variable");
|
||||
}
|
||||
|
||||
if (page_server_connstring && page_server_connstring[0])
|
||||
{
|
||||
neon_log(PageStoreTrace, "set neon_smgr hook");
|
||||
|
||||
@@ -267,7 +267,7 @@ readahead_buffer_resize(int newsize, void *extra)
|
||||
nfree = newsize;
|
||||
PrefetchState *newPState;
|
||||
Size newprfs_size = offsetof(PrefetchState, prf_buffer) + (
|
||||
sizeof(PrefetchRequest) * readahead_buffer_size
|
||||
sizeof(PrefetchRequest) * newsize
|
||||
);
|
||||
|
||||
/* don't try to re-initialize if we haven't initialized yet */
|
||||
@@ -1780,6 +1780,17 @@ neon_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno,
|
||||
&request_lsn);
|
||||
slot = GetPrfSlot(ring_index);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Empty our reference to the prefetch buffer's hash entry.
|
||||
* When we wait for prefetches, the entry reference is invalidated by
|
||||
* potential updates to the hash, and when we reconnect to the
|
||||
* pageserver the prefetch we're waiting for may be dropped,
|
||||
* in which case we need to retry and take the branch above.
|
||||
*/
|
||||
entry = NULL;
|
||||
}
|
||||
|
||||
Assert(slot->my_ring_index == ring_index);
|
||||
Assert(MyPState->ring_last <= ring_index &&
|
||||
|
||||
@@ -80,6 +80,7 @@ bool am_wal_proposer;
|
||||
|
||||
char *neon_timeline_walproposer = NULL;
|
||||
char *neon_tenant_walproposer = NULL;
|
||||
char *neon_safekeeper_token_walproposer = NULL;
|
||||
|
||||
#define WAL_PROPOSER_SLOT_NAME "wal_proposer_slot"
|
||||
|
||||
@@ -509,11 +510,25 @@ WalProposerInit(XLogRecPtr flushRecPtr, uint64 systemId)
|
||||
safekeeper[n_safekeepers].state = SS_OFFLINE;
|
||||
safekeeper[n_safekeepers].conn = NULL;
|
||||
|
||||
/*
|
||||
* Set conninfo to empty. We'll fill it out once later, in
|
||||
* `ResetConnection` as needed
|
||||
*/
|
||||
safekeeper[n_safekeepers].conninfo[0] = '\0';
|
||||
{
|
||||
Safekeeper *sk = &safekeeper[n_safekeepers];
|
||||
int written = 0;
|
||||
|
||||
if (neon_safekeeper_token_walproposer != NULL) {
|
||||
written = snprintf((char *) &sk->conninfo, MAXCONNINFO,
|
||||
"host=%s port=%s password=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
|
||||
sk->host, sk->port, neon_safekeeper_token_walproposer, neon_timeline_walproposer,
|
||||
neon_tenant_walproposer);
|
||||
} else {
|
||||
written = snprintf((char *) &sk->conninfo, MAXCONNINFO,
|
||||
"host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
|
||||
sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer);
|
||||
}
|
||||
|
||||
if (written > MAXCONNINFO || written < 0)
|
||||
elog(FATAL, "could not create connection string for safekeeper %s:%s", sk->host, sk->port);
|
||||
}
|
||||
|
||||
initStringInfo(&safekeeper[n_safekeepers].outbuf);
|
||||
safekeeper[n_safekeepers].xlogreader = XLogReaderAllocate(wal_segment_size, NULL, XL_ROUTINE(.segment_open = wal_segment_open,.segment_close = wal_segment_close), NULL);
|
||||
if (safekeeper[n_safekeepers].xlogreader == NULL)
|
||||
@@ -684,31 +699,7 @@ ResetConnection(Safekeeper *sk)
|
||||
|
||||
/*
|
||||
* Try to establish new connection
|
||||
*
|
||||
* If the connection information hasn't been filled out, we need to do
|
||||
* that here.
|
||||
*/
|
||||
if (sk->conninfo[0] == '\0')
|
||||
{
|
||||
int written = 0;
|
||||
|
||||
written = snprintf((char *) &sk->conninfo, MAXCONNINFO,
|
||||
"host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
|
||||
sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer);
|
||||
|
||||
/*
|
||||
* currently connection string is not that long, but once we pass
|
||||
* something like jwt we might overflow the buffer,
|
||||
*/
|
||||
|
||||
/*
|
||||
* so it is better to be defensive and check that everything aligns
|
||||
* well
|
||||
*/
|
||||
if (written > MAXCONNINFO || written < 0)
|
||||
elog(FATAL, "could not create connection string for safekeeper %s:%s", sk->host, sk->port);
|
||||
}
|
||||
|
||||
sk->conn = walprop_connect_start((char *) &sk->conninfo);
|
||||
|
||||
/*
|
||||
@@ -729,12 +720,13 @@ ResetConnection(Safekeeper *sk)
|
||||
* According to libpq docs:
|
||||
* "If the result is CONNECTION_BAD, the connection attempt has already failed,
|
||||
* typically because of invalid connection parameters."
|
||||
* We should report this failure.
|
||||
* We should report this failure. Do not print the exact `conninfo` as it may
|
||||
* contain e.g. password. The error message should already provide enough information.
|
||||
*
|
||||
* https://www.postgresql.org/docs/devel/libpq-connect.html#LIBPQ-PQCONNECTSTARTPARAMS
|
||||
*/
|
||||
elog(WARNING, "Immediate failure to connect with node:\n\t%s\n\terror: %s",
|
||||
sk->conninfo, walprop_error_message(sk->conn));
|
||||
elog(WARNING, "Immediate failure to connect with node '%s:%s':\n\terror: %s",
|
||||
sk->host, sk->port, walprop_error_message(sk->conn));
|
||||
|
||||
/*
|
||||
* Even though the connection failed, we still need to clean up the
|
||||
@@ -1403,14 +1395,11 @@ DetermineEpochStartLsn(void)
|
||||
static bool
|
||||
WalProposerRecovery(int donor, TimeLineID timeline, XLogRecPtr startpos, XLogRecPtr endpos)
|
||||
{
|
||||
char conninfo[MAXCONNINFO];
|
||||
char *err;
|
||||
WalReceiverConn *wrconn;
|
||||
WalRcvStreamOptions options;
|
||||
|
||||
sprintf(conninfo, "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
|
||||
safekeeper[donor].host, safekeeper[donor].port, neon_timeline_walproposer, neon_tenant_walproposer);
|
||||
wrconn = walrcv_connect(conninfo, false, "wal_proposer_recovery", &err);
|
||||
wrconn = walrcv_connect(safekeeper[donor].conninfo, false, "wal_proposer_recovery", &err);
|
||||
if (!wrconn)
|
||||
{
|
||||
ereport(WARNING,
|
||||
|
||||
@@ -41,6 +41,7 @@ typedef struct WalMessage WalMessage;
|
||||
|
||||
extern char *neon_timeline_walproposer;
|
||||
extern char *neon_tenant_walproposer;
|
||||
extern char *neon_safekeeper_token_walproposer;
|
||||
|
||||
/* Possible return values from ReadPGAsync */
|
||||
typedef enum
|
||||
@@ -337,8 +338,13 @@ typedef struct Safekeeper
|
||||
{
|
||||
char const *host;
|
||||
char const *port;
|
||||
char conninfo[MAXCONNINFO]; /* connection info for*
|
||||
* connecting/reconnecting */
|
||||
|
||||
/*
|
||||
* connection string for connecting/reconnecting.
|
||||
*
|
||||
* May contain private information like password and should not be logged.
|
||||
*/
|
||||
char conninfo[MAXCONNINFO];
|
||||
|
||||
/*
|
||||
* postgres protocol connection to the WAL acceptor
|
||||
|
||||
765
poetry.lock
generated
765
poetry.lock
generated
File diff suppressed because one or more lines are too long
@@ -11,14 +11,15 @@ pub enum ClientCredsParseError {
|
||||
#[error("Parameter '{0}' is missing in startup packet.")]
|
||||
MissingKey(&'static str),
|
||||
|
||||
#[error("Inconsistent project name inferred from SNI ('{0}') and project option ('{1}').")]
|
||||
InconsistentProjectNames(String, String),
|
||||
#[error("Inconsistent project name inferred from SNI ('{}') and project option ('{}').", .domain, .option)]
|
||||
InconsistentProjectNames { domain: String, option: String },
|
||||
|
||||
#[error(
|
||||
"SNI ('{1}') inconsistently formatted with respect to common name ('{0}'). \
|
||||
SNI should be formatted as '<project-name>.{0}'."
|
||||
"SNI ('{}') inconsistently formatted with respect to common name ('{}'). \
|
||||
SNI should be formatted as '<project-name>.{}'.",
|
||||
.sni, .cn, .cn,
|
||||
)]
|
||||
InconsistentSni(String, String),
|
||||
InconsistentSni { sni: String, cn: String },
|
||||
|
||||
#[error("Project name ('{0}') must contain only alphanumeric characters and hyphen.")]
|
||||
MalformedProjectName(String),
|
||||
@@ -67,7 +68,7 @@ impl<'a> ClientCredentials<'a> {
|
||||
let dbname = get_param("database")?;
|
||||
|
||||
// Project name might be passed via PG's command-line options.
|
||||
let project_a = params.options_raw().and_then(|mut options| {
|
||||
let project_option = params.options_raw().and_then(|mut options| {
|
||||
options
|
||||
.find_map(|opt| opt.strip_prefix("project="))
|
||||
.map(Cow::Borrowed)
|
||||
@@ -75,18 +76,26 @@ impl<'a> ClientCredentials<'a> {
|
||||
|
||||
// Alternative project name is in fact a subdomain from SNI.
|
||||
// NOTE: we do not consider SNI if `common_name` is missing.
|
||||
let project_b = sni
|
||||
let project_domain = sni
|
||||
.zip(common_name)
|
||||
.map(|(sni, cn)| {
|
||||
subdomain_from_sni(sni, cn)
|
||||
.ok_or_else(|| InconsistentSni(sni.into(), cn.into()))
|
||||
.ok_or_else(|| InconsistentSni {
|
||||
sni: sni.into(),
|
||||
cn: cn.into(),
|
||||
})
|
||||
.map(Cow::<'static, str>::Owned)
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let project = match (project_a, project_b) {
|
||||
let project = match (project_option, project_domain) {
|
||||
// Invariant: if we have both project name variants, they should match.
|
||||
(Some(a), Some(b)) if a != b => Some(Err(InconsistentProjectNames(a.into(), b.into()))),
|
||||
(Some(option), Some(domain)) if option != domain => {
|
||||
Some(Err(InconsistentProjectNames {
|
||||
domain: domain.into(),
|
||||
option: option.into(),
|
||||
}))
|
||||
}
|
||||
// Invariant: project name may not contain certain characters.
|
||||
(a, b) => a.or(b).map(|name| match project_name_valid(&name) {
|
||||
false => Err(MalformedProjectName(name.into())),
|
||||
@@ -123,6 +132,7 @@ fn subdomain_from_sni(sni: &str, common_name: &str) -> Option<String> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ClientCredsParseError::*;
|
||||
|
||||
#[test]
|
||||
#[ignore = "TODO: fix how database is handled"]
|
||||
@@ -210,9 +220,30 @@ mod tests {
|
||||
let sni = Some("second.localhost");
|
||||
let common_name = Some("localhost");
|
||||
|
||||
assert!(matches!(
|
||||
ClientCredentials::parse(&options, sni, common_name).expect_err("should fail"),
|
||||
ClientCredsParseError::InconsistentProjectNames(_, _)
|
||||
));
|
||||
let err = ClientCredentials::parse(&options, sni, common_name).expect_err("should fail");
|
||||
match err {
|
||||
InconsistentProjectNames { domain, option } => {
|
||||
assert_eq!(option, "first");
|
||||
assert_eq!(domain, "second");
|
||||
}
|
||||
_ => panic!("bad error: {err:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_inconsistent_sni() {
|
||||
let options = StartupMessageParams::new([("user", "john_doe"), ("database", "world")]);
|
||||
|
||||
let sni = Some("project.localhost");
|
||||
let common_name = Some("example.com");
|
||||
|
||||
let err = ClientCredentials::parse(&options, sni, common_name).expect_err("should fail");
|
||||
match err {
|
||||
InconsistentSni { sni, cn } => {
|
||||
assert_eq!(sni, "project.localhost");
|
||||
assert_eq!(cn, "example.com");
|
||||
}
|
||||
_ => panic!("bad error: {err:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
use crate::auth;
|
||||
use crate::cancellation::{self, CancelMap};
|
||||
use crate::config::{ProxyConfig, TlsConfig};
|
||||
use crate::stream::{MeasuredStream, PqStream, Stream};
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::{
|
||||
auth,
|
||||
cancellation::{self, CancelMap},
|
||||
config::{ProxyConfig, TlsConfig},
|
||||
stream::{MeasuredStream, PqStream, Stream},
|
||||
};
|
||||
use anyhow::{bail, Context};
|
||||
use futures::TryFutureExt;
|
||||
use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
|
||||
@@ -292,298 +297,3 @@ impl<S: AsyncRead + AsyncWrite + Unpin + Send> Client<'_, S> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{auth, scram};
|
||||
use async_trait::async_trait;
|
||||
use rstest::rstest;
|
||||
use tokio_postgres::config::SslMode;
|
||||
use tokio_postgres::tls::{MakeTlsConnect, NoTls};
|
||||
use tokio_postgres_rustls::MakeRustlsConnect;
|
||||
|
||||
/// Generate a set of TLS certificates: CA + server.
|
||||
fn generate_certs(
|
||||
hostname: &str,
|
||||
) -> anyhow::Result<(rustls::Certificate, rustls::Certificate, rustls::PrivateKey)> {
|
||||
let ca = rcgen::Certificate::from_params({
|
||||
let mut params = rcgen::CertificateParams::default();
|
||||
params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
|
||||
params
|
||||
})?;
|
||||
|
||||
let cert = rcgen::generate_simple_self_signed(vec![hostname.into()])?;
|
||||
Ok((
|
||||
rustls::Certificate(ca.serialize_der()?),
|
||||
rustls::Certificate(cert.serialize_der_with_signer(&ca)?),
|
||||
rustls::PrivateKey(cert.serialize_private_key_der()),
|
||||
))
|
||||
}
|
||||
|
||||
struct ClientConfig<'a> {
|
||||
config: rustls::ClientConfig,
|
||||
hostname: &'a str,
|
||||
}
|
||||
|
||||
impl ClientConfig<'_> {
|
||||
fn make_tls_connect<S: AsyncRead + AsyncWrite + Unpin + Send + 'static>(
|
||||
self,
|
||||
) -> anyhow::Result<impl tokio_postgres::tls::TlsConnect<S>> {
|
||||
let mut mk = MakeRustlsConnect::new(self.config);
|
||||
let tls = MakeTlsConnect::<S>::make_tls_connect(&mut mk, self.hostname)?;
|
||||
Ok(tls)
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate TLS certificates and build rustls configs for client and server.
|
||||
fn generate_tls_config<'a>(
|
||||
hostname: &'a str,
|
||||
common_name: &'a str,
|
||||
) -> anyhow::Result<(ClientConfig<'a>, TlsConfig)> {
|
||||
let (ca, cert, key) = generate_certs(hostname)?;
|
||||
|
||||
let tls_config = {
|
||||
let config = rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(vec![cert], key)?
|
||||
.into();
|
||||
|
||||
TlsConfig {
|
||||
config,
|
||||
common_name: Some(common_name.to_string()),
|
||||
}
|
||||
};
|
||||
|
||||
let client_config = {
|
||||
let config = rustls::ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates({
|
||||
let mut store = rustls::RootCertStore::empty();
|
||||
store.add(&ca)?;
|
||||
store
|
||||
})
|
||||
.with_no_client_auth();
|
||||
|
||||
ClientConfig { config, hostname }
|
||||
};
|
||||
|
||||
Ok((client_config, tls_config))
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
trait TestAuth: Sized {
|
||||
async fn authenticate<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
self,
|
||||
_stream: &mut PqStream<Stream<S>>,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct NoAuth;
|
||||
impl TestAuth for NoAuth {}
|
||||
|
||||
struct Scram(scram::ServerSecret);
|
||||
|
||||
impl Scram {
|
||||
fn new(password: &str) -> anyhow::Result<Self> {
|
||||
let salt = rand::random::<[u8; 16]>();
|
||||
let secret = scram::ServerSecret::build(password, &salt, 256)
|
||||
.context("failed to generate scram secret")?;
|
||||
Ok(Scram(secret))
|
||||
}
|
||||
|
||||
fn mock(user: &str) -> Self {
|
||||
let salt = rand::random::<[u8; 32]>();
|
||||
Scram(scram::ServerSecret::mock(user, &salt))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TestAuth for Scram {
|
||||
async fn authenticate<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
self,
|
||||
stream: &mut PqStream<Stream<S>>,
|
||||
) -> anyhow::Result<()> {
|
||||
auth::AuthFlow::new(stream)
|
||||
.begin(auth::Scram(&self.0))
|
||||
.await?
|
||||
.authenticate()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy proxy impl which performs a handshake and reports auth success.
|
||||
async fn dummy_proxy(
|
||||
client: impl AsyncRead + AsyncWrite + Unpin + Send,
|
||||
tls: Option<TlsConfig>,
|
||||
auth: impl TestAuth + Send,
|
||||
) -> anyhow::Result<()> {
|
||||
let cancel_map = CancelMap::default();
|
||||
let (mut stream, _params) = handshake(client, tls.as_ref(), &cancel_map)
|
||||
.await?
|
||||
.context("handshake failed")?;
|
||||
|
||||
auth.authenticate(&mut stream).await?;
|
||||
|
||||
stream
|
||||
.write_message_noflush(&Be::AuthenticationOk)?
|
||||
.write_message_noflush(&BeParameterStatusMessage::encoding())?
|
||||
.write_message(&BeMessage::ReadyForQuery)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (_, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth));
|
||||
|
||||
let client_err = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.ssl_mode(SslMode::Disable)
|
||||
.connect_raw(server, NoTls)
|
||||
.await
|
||||
.err() // -> Option<E>
|
||||
.context("client shouldn't be able to connect")?;
|
||||
|
||||
assert!(client_err.to_string().contains(ERR_INSECURE_CONNECTION));
|
||||
|
||||
let server_err = proxy
|
||||
.await?
|
||||
.err() // -> Option<E>
|
||||
.context("server shouldn't accept client")?;
|
||||
|
||||
assert!(client_err.to_string().contains(&server_err.to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_tls() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_raw() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let proxy = tokio::spawn(dummy_proxy(client, None, NoAuth));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.options("project=generic-project-name")
|
||||
.ssl_mode(SslMode::Prefer)
|
||||
.connect_raw(server, NoTls)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn keepalive_is_inherited() -> anyhow::Result<()> {
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await?;
|
||||
let port = listener.local_addr()?.port();
|
||||
socket2::SockRef::from(&listener).set_keepalive(true)?;
|
||||
|
||||
let t = tokio::spawn(async move {
|
||||
let (client, _) = listener.accept().await?;
|
||||
let keepalive = socket2::SockRef::from(&client).keepalive()?;
|
||||
anyhow::Ok(keepalive)
|
||||
});
|
||||
|
||||
let _ = TcpStream::connect(("127.0.0.1", port)).await?;
|
||||
assert!(t.await??, "keepalive should be inherited");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case("password_foo")]
|
||||
#[case("pwd-bar")]
|
||||
#[case("")]
|
||||
#[tokio::test]
|
||||
async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new(password)?,
|
||||
));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("user")
|
||||
.dbname("db")
|
||||
.password(password)
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn scram_auth_mock() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::mock("user"),
|
||||
));
|
||||
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
let password: String = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(rand::random::<u8>() as usize)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
|
||||
let _client_err = tokio_postgres::Config::new()
|
||||
.user("user")
|
||||
.dbname("db")
|
||||
.password(&password) // no password will match the mocked secret
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await
|
||||
.err() // -> Option<E>
|
||||
.context("client shouldn't be able to connect")?;
|
||||
|
||||
let _server_err = proxy
|
||||
.await?
|
||||
.err() // -> Option<E>
|
||||
.context("server shouldn't accept client")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
291
proxy/src/proxy/tests.rs
Normal file
291
proxy/src/proxy/tests.rs
Normal file
@@ -0,0 +1,291 @@
|
||||
///! A group of high-level tests for connection establishing logic and auth.
|
||||
use super::*;
|
||||
use crate::{auth, scram};
|
||||
use async_trait::async_trait;
|
||||
use rstest::rstest;
|
||||
use tokio_postgres::config::SslMode;
|
||||
use tokio_postgres::tls::{MakeTlsConnect, NoTls};
|
||||
use tokio_postgres_rustls::MakeRustlsConnect;
|
||||
|
||||
/// Generate a set of TLS certificates: CA + server.
|
||||
fn generate_certs(
|
||||
hostname: &str,
|
||||
) -> anyhow::Result<(rustls::Certificate, rustls::Certificate, rustls::PrivateKey)> {
|
||||
let ca = rcgen::Certificate::from_params({
|
||||
let mut params = rcgen::CertificateParams::default();
|
||||
params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
|
||||
params
|
||||
})?;
|
||||
|
||||
let cert = rcgen::generate_simple_self_signed(vec![hostname.into()])?;
|
||||
Ok((
|
||||
rustls::Certificate(ca.serialize_der()?),
|
||||
rustls::Certificate(cert.serialize_der_with_signer(&ca)?),
|
||||
rustls::PrivateKey(cert.serialize_private_key_der()),
|
||||
))
|
||||
}
|
||||
|
||||
struct ClientConfig<'a> {
|
||||
config: rustls::ClientConfig,
|
||||
hostname: &'a str,
|
||||
}
|
||||
|
||||
impl ClientConfig<'_> {
|
||||
fn make_tls_connect<S: AsyncRead + AsyncWrite + Unpin + Send + 'static>(
|
||||
self,
|
||||
) -> anyhow::Result<impl tokio_postgres::tls::TlsConnect<S>> {
|
||||
let mut mk = MakeRustlsConnect::new(self.config);
|
||||
let tls = MakeTlsConnect::<S>::make_tls_connect(&mut mk, self.hostname)?;
|
||||
Ok(tls)
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate TLS certificates and build rustls configs for client and server.
|
||||
fn generate_tls_config<'a>(
|
||||
hostname: &'a str,
|
||||
common_name: &'a str,
|
||||
) -> anyhow::Result<(ClientConfig<'a>, TlsConfig)> {
|
||||
let (ca, cert, key) = generate_certs(hostname)?;
|
||||
|
||||
let tls_config = {
|
||||
let config = rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(vec![cert], key)?
|
||||
.into();
|
||||
|
||||
TlsConfig {
|
||||
config,
|
||||
common_name: Some(common_name.to_string()),
|
||||
}
|
||||
};
|
||||
|
||||
let client_config = {
|
||||
let config = rustls::ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates({
|
||||
let mut store = rustls::RootCertStore::empty();
|
||||
store.add(&ca)?;
|
||||
store
|
||||
})
|
||||
.with_no_client_auth();
|
||||
|
||||
ClientConfig { config, hostname }
|
||||
};
|
||||
|
||||
Ok((client_config, tls_config))
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
trait TestAuth: Sized {
|
||||
async fn authenticate<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
self,
|
||||
_stream: &mut PqStream<Stream<S>>,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct NoAuth;
|
||||
impl TestAuth for NoAuth {}
|
||||
|
||||
struct Scram(scram::ServerSecret);
|
||||
|
||||
impl Scram {
|
||||
fn new(password: &str) -> anyhow::Result<Self> {
|
||||
let salt = rand::random::<[u8; 16]>();
|
||||
let secret = scram::ServerSecret::build(password, &salt, 256)
|
||||
.context("failed to generate scram secret")?;
|
||||
Ok(Scram(secret))
|
||||
}
|
||||
|
||||
fn mock(user: &str) -> Self {
|
||||
let salt = rand::random::<[u8; 32]>();
|
||||
Scram(scram::ServerSecret::mock(user, &salt))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TestAuth for Scram {
|
||||
async fn authenticate<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
self,
|
||||
stream: &mut PqStream<Stream<S>>,
|
||||
) -> anyhow::Result<()> {
|
||||
auth::AuthFlow::new(stream)
|
||||
.begin(auth::Scram(&self.0))
|
||||
.await?
|
||||
.authenticate()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy proxy impl which performs a handshake and reports auth success.
|
||||
async fn dummy_proxy(
|
||||
client: impl AsyncRead + AsyncWrite + Unpin + Send,
|
||||
tls: Option<TlsConfig>,
|
||||
auth: impl TestAuth + Send,
|
||||
) -> anyhow::Result<()> {
|
||||
let cancel_map = CancelMap::default();
|
||||
let (mut stream, _params) = handshake(client, tls.as_ref(), &cancel_map)
|
||||
.await?
|
||||
.context("handshake failed")?;
|
||||
|
||||
auth.authenticate(&mut stream).await?;
|
||||
|
||||
stream
|
||||
.write_message_noflush(&Be::AuthenticationOk)?
|
||||
.write_message_noflush(&BeParameterStatusMessage::encoding())?
|
||||
.write_message(&BeMessage::ReadyForQuery)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (_, server_config) = generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth));
|
||||
|
||||
let client_err = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.ssl_mode(SslMode::Disable)
|
||||
.connect_raw(server, NoTls)
|
||||
.await
|
||||
.err() // -> Option<E>
|
||||
.context("client shouldn't be able to connect")?;
|
||||
|
||||
assert!(client_err.to_string().contains(ERR_INSECURE_CONNECTION));
|
||||
|
||||
let server_err = proxy
|
||||
.await?
|
||||
.err() // -> Option<E>
|
||||
.context("server shouldn't accept client")?;
|
||||
|
||||
assert!(client_err.to_string().contains(&server_err.to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_tls() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_raw() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let proxy = tokio::spawn(dummy_proxy(client, None, NoAuth));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("john_doe")
|
||||
.dbname("earth")
|
||||
.options("project=generic-project-name")
|
||||
.ssl_mode(SslMode::Prefer)
|
||||
.connect_raw(server, NoTls)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn keepalive_is_inherited() -> anyhow::Result<()> {
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await?;
|
||||
let port = listener.local_addr()?.port();
|
||||
socket2::SockRef::from(&listener).set_keepalive(true)?;
|
||||
|
||||
let t = tokio::spawn(async move {
|
||||
let (client, _) = listener.accept().await?;
|
||||
let keepalive = socket2::SockRef::from(&client).keepalive()?;
|
||||
anyhow::Ok(keepalive)
|
||||
});
|
||||
|
||||
let _ = TcpStream::connect(("127.0.0.1", port)).await?;
|
||||
assert!(t.await??, "keepalive should be inherited");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case("password_foo")]
|
||||
#[case("pwd-bar")]
|
||||
#[case("")]
|
||||
#[tokio::test]
|
||||
async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new(password)?,
|
||||
));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
.user("user")
|
||||
.dbname("db")
|
||||
.password(password)
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await?;
|
||||
|
||||
proxy.await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn scram_auth_mock() -> anyhow::Result<()> {
|
||||
let (client, server) = tokio::io::duplex(1024);
|
||||
|
||||
let (client_config, server_config) =
|
||||
generate_tls_config("generic-project-name.localhost", "localhost")?;
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::mock("user"),
|
||||
));
|
||||
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
let password: String = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(rand::random::<u8>() as usize)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
|
||||
let _client_err = tokio_postgres::Config::new()
|
||||
.user("user")
|
||||
.dbname("db")
|
||||
.password(&password) // no password will match the mocked secret
|
||||
.ssl_mode(SslMode::Require)
|
||||
.connect_raw(server, client_config.make_tls_connect()?)
|
||||
.await
|
||||
.err() // -> Option<E>
|
||||
.context("client shouldn't be able to connect")?;
|
||||
|
||||
let _server_err = proxy
|
||||
.await?
|
||||
.err() // -> Option<E>
|
||||
.context("server shouldn't accept client")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -12,13 +12,13 @@ typing-extensions = "^4.1.0"
|
||||
PyJWT = {version = "^2.1.0", extras = ["crypto"]}
|
||||
requests = "^2.26.0"
|
||||
pytest-xdist = "^3.0.2"
|
||||
asyncpg = "^0.24.0"
|
||||
asyncpg = "^0.27.0"
|
||||
aiopg = "^1.3.1"
|
||||
Jinja2 = "^3.0.2"
|
||||
types-requests = "^2.28.5"
|
||||
types-psycopg2 = "^2.9.18"
|
||||
boto3 = "^1.20.40"
|
||||
boto3-stubs = {version = "^1.23.38", extras = ["s3"]}
|
||||
boto3 = "^1.26.16"
|
||||
boto3-stubs = {extras = ["s3"], version = "^1.26.16"}
|
||||
moto = {version = "^3.0.0", extras = ["server"]}
|
||||
backoff = "^1.11.1"
|
||||
pytest-lazy-fixture = "^0.6.3"
|
||||
@@ -31,13 +31,13 @@ pytest-asyncio = "^0.19.0"
|
||||
toml = "^0.10.2"
|
||||
psutil = "^5.9.4"
|
||||
types-psutil = "^5.9.5.4"
|
||||
types-toml = "^0.10.8"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
flake8 = "^5.0.4"
|
||||
mypy = "==0.971"
|
||||
black = "^22.6.0"
|
||||
isort = "^5.10.1"
|
||||
types-toml = "^0.10.8"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
|
||||
19
safekeeper/src/auth.rs
Normal file
19
safekeeper/src/auth.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use anyhow::{bail, Result};
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::id::TenantId;
|
||||
|
||||
pub fn check_permission(claims: &Claims, tenant_id: Option<TenantId>) -> Result<()> {
|
||||
match (&claims.scope, tenant_id) {
|
||||
(Scope::Tenant, None) => {
|
||||
bail!("Attempt to access management api with tenant scope. Permission denied")
|
||||
}
|
||||
(Scope::Tenant, Some(tenant_id)) => {
|
||||
if claims.tenant_id.unwrap() != tenant_id {
|
||||
bail!("Tenant id mismatch. Permission denied")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
(Scope::PageServerApi, _) => bail!("PageServerApi scope makes no sense for Safekeeper"),
|
||||
(Scope::SafekeeperData, _) => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -208,11 +208,12 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bo
|
||||
GlobalTimelines::init(conf.clone(), wal_backup_launcher_tx)?;
|
||||
|
||||
let conf_ = conf.clone();
|
||||
let auth_ = auth.clone();
|
||||
threads.push(
|
||||
thread::Builder::new()
|
||||
.name("http_endpoint_thread".into())
|
||||
.spawn(|| {
|
||||
let router = http::make_router(conf_, auth);
|
||||
let router = http::make_router(conf_, auth_);
|
||||
endpoint::serve_thread_main(
|
||||
router,
|
||||
http_listener,
|
||||
@@ -226,8 +227,7 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bo
|
||||
let safekeeper_thread = thread::Builder::new()
|
||||
.name("Safekeeper thread".into())
|
||||
.spawn(|| {
|
||||
// TODO: add auth
|
||||
if let Err(e) = wal_service::thread_main(conf_cloned, pg_listener) {
|
||||
if let Err(e) = wal_service::thread_main(conf_cloned, pg_listener, auth) {
|
||||
info!("safekeeper thread terminated: {e}");
|
||||
}
|
||||
})
|
||||
@@ -254,7 +254,6 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bo
|
||||
thread::Builder::new()
|
||||
.name("WAL removal thread".into())
|
||||
.spawn(|| {
|
||||
// TODO: add auth?
|
||||
remove_wal::thread_main(conf_);
|
||||
})?,
|
||||
);
|
||||
@@ -264,7 +263,6 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bo
|
||||
thread::Builder::new()
|
||||
.name("wal backup launcher thread".into())
|
||||
.spawn(move || {
|
||||
// TODO: add auth?
|
||||
wal_backup::wal_backup_launcher_thread_main(conf_, wal_backup_launcher_rx);
|
||||
})?,
|
||||
);
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
//! Part of Safekeeper pretending to be Postgres, i.e. handling Postgres
|
||||
//! protocol commands.
|
||||
|
||||
use crate::auth::check_permission;
|
||||
use crate::json_ctrl::{handle_json_ctrl, AppendLogicalMessage};
|
||||
use crate::receive_wal::ReceiveWalConn;
|
||||
|
||||
use crate::send_wal::ReplicationConn;
|
||||
|
||||
use crate::{GlobalTimelines, SafeKeeperConf};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
|
||||
use postgres_ffi::PG_TLI;
|
||||
use regex::Regex;
|
||||
|
||||
use pq_proto::{BeMessage, FeStartupPacket, RowDescriptor, INT4_OID, TEXT_OID};
|
||||
use std::str;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
use utils::auth::{Claims, JwtAuth, Scope};
|
||||
use utils::{
|
||||
id::{TenantId, TenantTimelineId, TimelineId},
|
||||
lsn::Lsn,
|
||||
@@ -28,6 +32,8 @@ pub struct SafekeeperPostgresHandler {
|
||||
pub tenant_id: Option<TenantId>,
|
||||
pub timeline_id: Option<TimelineId>,
|
||||
pub ttid: TenantTimelineId,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
claims: Option<Claims>,
|
||||
}
|
||||
|
||||
/// Parsed Postgres command.
|
||||
@@ -93,7 +99,44 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_auth_jwt(
|
||||
&mut self,
|
||||
_pgb: &mut PostgresBackend,
|
||||
jwt_response: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
// this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
|
||||
// which requires auth to be present
|
||||
let data = self
|
||||
.auth
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.decode(str::from_utf8(jwt_response)?)?;
|
||||
|
||||
if matches!(data.claims.scope, Scope::Tenant) {
|
||||
ensure!(
|
||||
data.claims.tenant_id.is_some(),
|
||||
"jwt token scope is Tenant, but tenant id is missing"
|
||||
)
|
||||
}
|
||||
|
||||
info!(
|
||||
"jwt auth succeeded for scope: {:#?} by tenant id: {:?}",
|
||||
data.claims.scope, data.claims.tenant_id,
|
||||
);
|
||||
|
||||
self.claims = Some(data.claims);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_query(&mut self, pgb: &mut PostgresBackend, query_string: &str) -> Result<()> {
|
||||
if query_string
|
||||
.to_ascii_lowercase()
|
||||
.starts_with("set datestyle to ")
|
||||
{
|
||||
// important for debug because psycopg2 executes "SET datestyle TO 'ISO'" on connect
|
||||
pgb.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||
return Ok(());
|
||||
}
|
||||
let cmd = parse_cmd(query_string)?;
|
||||
|
||||
info!(
|
||||
@@ -103,6 +146,7 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler {
|
||||
|
||||
let tenant_id = self.tenant_id.context("tenantid is required")?;
|
||||
let timeline_id = self.timeline_id.context("timelineid is required")?;
|
||||
self.check_permission(Some(tenant_id))?;
|
||||
self.ttid = TenantTimelineId::new(tenant_id, timeline_id);
|
||||
|
||||
match cmd {
|
||||
@@ -122,16 +166,35 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler {
|
||||
}
|
||||
|
||||
impl SafekeeperPostgresHandler {
|
||||
pub fn new(conf: SafeKeeperConf) -> Self {
|
||||
pub fn new(conf: SafeKeeperConf, auth: Option<Arc<JwtAuth>>) -> Self {
|
||||
SafekeeperPostgresHandler {
|
||||
conf,
|
||||
appname: None,
|
||||
tenant_id: None,
|
||||
timeline_id: None,
|
||||
ttid: TenantTimelineId::empty(),
|
||||
auth,
|
||||
claims: None,
|
||||
}
|
||||
}
|
||||
|
||||
// when accessing management api supply None as an argument
|
||||
// when using to authorize tenant pass corresponding tenant id
|
||||
fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<()> {
|
||||
if self.auth.is_none() {
|
||||
// auth is set to Trust, nothing to check so just return ok
|
||||
return Ok(());
|
||||
}
|
||||
// auth is some, just checked above, when auth is some
|
||||
// then claims are always present because of checks during connection init
|
||||
// so this expect won't trigger
|
||||
let claims = self
|
||||
.claims
|
||||
.as_ref()
|
||||
.expect("claims presence already checked");
|
||||
check_permission(claims, tenant_id)
|
||||
}
|
||||
|
||||
///
|
||||
/// Handle IDENTIFY_SYSTEM replication command
|
||||
///
|
||||
|
||||
@@ -12,7 +12,6 @@ use tokio::task::JoinError;
|
||||
|
||||
use crate::safekeeper::ServerInfo;
|
||||
use crate::safekeeper::Term;
|
||||
use crate::safekeeper::TermHistory;
|
||||
|
||||
use crate::timelines_global_map::TimelineDeleteForceResult;
|
||||
use crate::GlobalTimelines;
|
||||
@@ -21,7 +20,7 @@ use etcd_broker::subscription_value::SkTimelineInfo;
|
||||
use utils::{
|
||||
auth::JwtAuth,
|
||||
http::{
|
||||
endpoint::{self, auth_middleware, check_permission},
|
||||
endpoint::{self, auth_middleware, check_permission_with},
|
||||
error::ApiError,
|
||||
json::{json_request, json_response},
|
||||
request::{ensure_no_body, parse_request_param},
|
||||
@@ -62,12 +61,21 @@ where
|
||||
s.serialize_str(&format!("{}", z))
|
||||
}
|
||||
|
||||
/// Same as TermSwitchEntry, but serializes LSN using display serializer
|
||||
/// in Postgres format, i.e. 0/FFFFFFFF. Used only for the API response.
|
||||
#[derive(Debug, Serialize)]
|
||||
struct TermSwitchApiEntry {
|
||||
pub term: Term,
|
||||
#[serde(serialize_with = "display_serialize")]
|
||||
pub lsn: Lsn,
|
||||
}
|
||||
|
||||
/// Augment AcceptorState with epoch for convenience
|
||||
#[derive(Debug, Serialize)]
|
||||
struct AcceptorStateStatus {
|
||||
term: Term,
|
||||
epoch: Term,
|
||||
term_history: TermHistory,
|
||||
term_history: Vec<TermSwitchApiEntry>,
|
||||
}
|
||||
|
||||
/// Info about timeline on safekeeper ready for reporting.
|
||||
@@ -95,6 +103,12 @@ struct TimelineStatus {
|
||||
remote_consistent_lsn: Lsn,
|
||||
}
|
||||
|
||||
fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
|
||||
check_permission_with(request, |claims| {
|
||||
crate::auth::check_permission(claims, tenant_id)
|
||||
})
|
||||
}
|
||||
|
||||
/// Report info about timeline.
|
||||
async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let ttid = TenantTimelineId::new(
|
||||
@@ -112,10 +126,21 @@ async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body
|
||||
let (inmem, state) = tli.get_state();
|
||||
let flush_lsn = tli.get_flush_lsn();
|
||||
|
||||
let epoch = state.acceptor_state.get_epoch(flush_lsn);
|
||||
let term_history = state
|
||||
.acceptor_state
|
||||
.term_history
|
||||
.0
|
||||
.into_iter()
|
||||
.map(|ts| TermSwitchApiEntry {
|
||||
term: ts.term,
|
||||
lsn: ts.lsn,
|
||||
})
|
||||
.collect();
|
||||
let acc_state = AcceptorStateStatus {
|
||||
term: state.acceptor_state.term,
|
||||
epoch: state.acceptor_state.get_epoch(flush_lsn),
|
||||
term_history: state.acceptor_state.term_history,
|
||||
epoch,
|
||||
term_history,
|
||||
};
|
||||
|
||||
// Note: we report in memory values which can be lost.
|
||||
@@ -277,3 +302,25 @@ pub fn make_router(
|
||||
record_safekeeper_info,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_term_switch_entry_api_serialize() {
|
||||
let state = AcceptorStateStatus {
|
||||
term: 1,
|
||||
epoch: 1,
|
||||
term_history: vec![TermSwitchApiEntry {
|
||||
term: 1,
|
||||
lsn: Lsn(0x16FFDDDD),
|
||||
}],
|
||||
};
|
||||
let json = serde_json::to_string(&state).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
"{\"term\":1,\"epoch\":1,\"term_history\":[{\"term\":1,\"lsn\":\"0/16FFDDDD\"}]}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ use utils::{
|
||||
logging::LogFormat,
|
||||
};
|
||||
|
||||
mod auth;
|
||||
pub mod broker;
|
||||
pub mod control_file;
|
||||
pub mod control_file_upgrade;
|
||||
|
||||
@@ -5,25 +5,32 @@
|
||||
use anyhow::Result;
|
||||
use regex::Regex;
|
||||
use std::net::{TcpListener, TcpStream};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use tracing::*;
|
||||
use utils::auth::JwtAuth;
|
||||
|
||||
use crate::handler::SafekeeperPostgresHandler;
|
||||
use crate::SafeKeeperConf;
|
||||
use utils::postgres_backend::{AuthType, PostgresBackend};
|
||||
|
||||
/// Accept incoming TCP connections and spawn them into a background thread.
|
||||
pub fn thread_main(conf: SafeKeeperConf, listener: TcpListener) -> Result<()> {
|
||||
pub fn thread_main(
|
||||
conf: SafeKeeperConf,
|
||||
listener: TcpListener,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
match listener.accept() {
|
||||
Ok((socket, peer_addr)) => {
|
||||
debug!("accepted connection from {}", peer_addr);
|
||||
let conf = conf.clone();
|
||||
|
||||
let auth = auth.clone();
|
||||
let _ = thread::Builder::new()
|
||||
.name("WAL service thread".into())
|
||||
.spawn(move || {
|
||||
if let Err(err) = handle_socket(socket, conf) {
|
||||
if let Err(err) = handle_socket(socket, conf, auth) {
|
||||
error!("connection handler exited: {}", err);
|
||||
}
|
||||
})
|
||||
@@ -44,13 +51,25 @@ fn get_tid() -> u64 {
|
||||
|
||||
/// This is run by `thread_main` above, inside a background thread.
|
||||
///
|
||||
fn handle_socket(socket: TcpStream, conf: SafeKeeperConf) -> Result<()> {
|
||||
fn handle_socket(
|
||||
socket: TcpStream,
|
||||
conf: SafeKeeperConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
) -> Result<()> {
|
||||
let _enter = info_span!("", tid = ?get_tid()).entered();
|
||||
|
||||
socket.set_nodelay(true)?;
|
||||
|
||||
let mut conn_handler = SafekeeperPostgresHandler::new(conf);
|
||||
let pgbackend = PostgresBackend::new(socket, AuthType::Trust, None, false)?;
|
||||
let mut conn_handler = SafekeeperPostgresHandler::new(conf, auth.clone());
|
||||
let pgbackend = PostgresBackend::new(
|
||||
socket,
|
||||
match auth {
|
||||
None => AuthType::Trust,
|
||||
Some(_) => AuthType::NeonJWT,
|
||||
},
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
// libpq replication protocol between safekeeper and replicas/pagers
|
||||
pgbackend.run(&mut conn_handler)?;
|
||||
|
||||
|
||||
37
storage_broker/Cargo.toml
Normal file
37
storage_broker/Cargo.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
[package]
|
||||
name = "storage_broker"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
bench = []
|
||||
|
||||
[dependencies]
|
||||
async-stream = "0.3"
|
||||
bytes = "1.0"
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
futures = "0.3"
|
||||
futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
git-version = "0.3.5"
|
||||
humantime = "2.1.0"
|
||||
hyper = {version = "0.14.14", features = ["full"]}
|
||||
once_cell = "1.13.0"
|
||||
parking_lot = "0.12"
|
||||
prost = "0.11"
|
||||
tonic = "0.8"
|
||||
tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] }
|
||||
tokio-stream = "0.1"
|
||||
tracing = "0.1.27"
|
||||
|
||||
metrics = { path = "../libs/metrics" }
|
||||
utils = { path = "../libs/utils" }
|
||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
|
||||
[[bench]]
|
||||
name = "rps"
|
||||
harness = false
|
||||
|
||||
174
storage_broker/benches/rps.rs
Normal file
174
storage_broker/benches/rps.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use clap::Parser;
|
||||
use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey;
|
||||
use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
|
||||
use storage_broker::proto::{SafekeeperTimelineInfo, SubscribeSafekeeperInfoRequest};
|
||||
use storage_broker::BrokerClientChannel;
|
||||
use storage_broker::DEFAULT_LISTEN_ADDR;
|
||||
use tokio::time;
|
||||
|
||||
use tonic::Request;
|
||||
|
||||
const ABOUT: &str = r#"
|
||||
A simple benchmarking tool for storage_broker. Creates specified number of per
|
||||
timeline publishers and subscribers; each publisher continiously sends
|
||||
messages, subscribers read them. Each second the tool outputs number of
|
||||
messages summed across all subscribers and min number of messages
|
||||
recevied by single subscriber.
|
||||
|
||||
For example,
|
||||
cargo build -r -p storage_broker && target/release/storage_broker
|
||||
cargo bench --bench rps -- -s 1 -p 1
|
||||
"#;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(author, version, about = ABOUT)]
|
||||
struct Args {
|
||||
/// Number of publishers
|
||||
#[clap(short = 'p', long, value_parser, default_value_t = 1)]
|
||||
num_pubs: u64,
|
||||
/// Number of subscribers
|
||||
#[clap(short = 's', long, value_parser, default_value_t = 1)]
|
||||
num_subs: u64,
|
||||
// Fake value to satisfy `cargo bench` passing it.
|
||||
#[clap(long)]
|
||||
bench: bool,
|
||||
}
|
||||
|
||||
async fn progress_reporter(counters: Vec<Arc<AtomicU64>>) {
|
||||
let mut interval = time::interval(Duration::from_millis(1000));
|
||||
let mut c_old = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum();
|
||||
let mut c_min_old = counters
|
||||
.iter()
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.min()
|
||||
.unwrap_or(0);
|
||||
let mut started_at = None;
|
||||
let mut skipped: u64 = 0;
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let c_new = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum();
|
||||
let c_min_new = counters
|
||||
.iter()
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.min()
|
||||
.unwrap_or(0);
|
||||
if c_new > 0 && started_at.is_none() {
|
||||
started_at = Some(Instant::now());
|
||||
skipped = c_new;
|
||||
}
|
||||
let avg_rps = started_at.map(|s| {
|
||||
let dur = s.elapsed();
|
||||
let dur_secs = dur.as_secs() as f64 + (dur.subsec_millis() as f64) / 1000.0;
|
||||
let avg_rps = (c_new - skipped) as f64 / dur_secs;
|
||||
(dur, avg_rps)
|
||||
});
|
||||
println!(
|
||||
"sum rps {}, min rps {} total {}, total min {}, duration, avg sum rps {:?}",
|
||||
c_new - c_old,
|
||||
c_min_new - c_min_old,
|
||||
c_new,
|
||||
c_min_new,
|
||||
avg_rps
|
||||
);
|
||||
c_old = c_new;
|
||||
c_min_old = c_min_new;
|
||||
}
|
||||
}
|
||||
|
||||
fn tli_from_u64(i: u64) -> Vec<u8> {
|
||||
let mut timeline_id = vec![0xFF; 8];
|
||||
timeline_id.extend_from_slice(&i.to_be_bytes());
|
||||
timeline_id
|
||||
}
|
||||
|
||||
async fn subscribe(client: Option<BrokerClientChannel>, counter: Arc<AtomicU64>, i: u64) {
|
||||
let mut client = match client {
|
||||
Some(c) => c,
|
||||
None => BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR))
|
||||
.await
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let key = SubscriptionKey::TenantTimelineId(ProtoTenantTimelineId {
|
||||
tenant_id: vec![0xFF; 16],
|
||||
timeline_id: tli_from_u64(i),
|
||||
});
|
||||
let request = SubscribeSafekeeperInfoRequest {
|
||||
subscription_key: Some(key),
|
||||
};
|
||||
let mut stream = client
|
||||
.subscribe_safekeeper_info(request)
|
||||
.await
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
|
||||
while let Some(_feature) = stream.message().await.unwrap() {
|
||||
counter.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
async fn publish(client: Option<BrokerClientChannel>, n_keys: u64) {
|
||||
let mut client = match client {
|
||||
Some(c) => c,
|
||||
None => BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR))
|
||||
.await
|
||||
.unwrap(),
|
||||
};
|
||||
let mut counter: u64 = 0;
|
||||
|
||||
// create stream producing new values
|
||||
let outbound = async_stream::stream! {
|
||||
loop {
|
||||
let info = SafekeeperTimelineInfo {
|
||||
safekeeper_id: 1,
|
||||
tenant_timeline_id: Some(ProtoTenantTimelineId {
|
||||
tenant_id: vec![0xFF; 16],
|
||||
timeline_id: tli_from_u64(counter % n_keys),
|
||||
}),
|
||||
last_log_term: 0,
|
||||
flush_lsn: counter,
|
||||
commit_lsn: 2,
|
||||
backup_lsn: 3,
|
||||
remote_consistent_lsn: 4,
|
||||
peer_horizon_lsn: 5,
|
||||
safekeeper_connstr: "zenith-1-sk-1.local:7676".to_owned(),
|
||||
local_start_lsn: 0,
|
||||
};
|
||||
counter += 1;
|
||||
yield info;
|
||||
}
|
||||
};
|
||||
let response = client.publish_safekeeper_info(Request::new(outbound)).await;
|
||||
println!("pub response is {:?}", response);
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
let mut counters = Vec::with_capacity(args.num_subs as usize);
|
||||
for _ in 0..args.num_subs {
|
||||
counters.push(Arc::new(AtomicU64::new(0)));
|
||||
}
|
||||
let h = tokio::spawn(progress_reporter(counters.clone()));
|
||||
|
||||
let c = BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for i in 0..args.num_subs {
|
||||
let c = Some(c.clone());
|
||||
tokio::spawn(subscribe(c, counters[i as usize].clone(), i));
|
||||
}
|
||||
for _i in 0..args.num_pubs {
|
||||
let c = None;
|
||||
tokio::spawn(publish(c, args.num_subs as u64));
|
||||
}
|
||||
|
||||
h.await?;
|
||||
Ok(())
|
||||
}
|
||||
11
storage_broker/build.rs
Normal file
11
storage_broker/build.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Generate rust code from .proto protobuf.
|
||||
//
|
||||
// Note: we previously tried to use deterministic location at proto/ for
|
||||
// easy location, but apparently interference with cachepot sometimes fails
|
||||
// the build then. Anyway, per cargo docs build script shouldn't output to
|
||||
// anywhere but $OUT_DIR.
|
||||
tonic_build::compile_protos("proto/broker.proto")
|
||||
.unwrap_or_else(|e| panic!("failed to compile protos {:?}", e));
|
||||
Ok(())
|
||||
}
|
||||
2
storage_broker/proto/.gitignore
vendored
Normal file
2
storage_broker/proto/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# protobuf generated code
|
||||
storage_broker.rs
|
||||
44
storage_broker/proto/broker.proto
Normal file
44
storage_broker/proto/broker.proto
Normal file
@@ -0,0 +1,44 @@
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
package storage_broker;
|
||||
|
||||
service BrokerService {
|
||||
// Subscribe to safekeeper updates.
|
||||
rpc SubscribeSafekeeperInfo(SubscribeSafekeeperInfoRequest) returns (stream SafekeeperTimelineInfo) {};
|
||||
|
||||
// Publish safekeeper updates.
|
||||
rpc PublishSafekeeperInfo(stream SafekeeperTimelineInfo) returns (google.protobuf.Empty) {};
|
||||
}
|
||||
|
||||
message SubscribeSafekeeperInfoRequest {
|
||||
oneof subscription_key {
|
||||
google.protobuf.Empty all = 1; // subscribe to everything
|
||||
TenantTimelineId tenant_timeline_id = 2; // subscribe to specific timeline
|
||||
}
|
||||
}
|
||||
|
||||
message SafekeeperTimelineInfo {
|
||||
uint64 safekeeper_id = 1;
|
||||
TenantTimelineId tenant_timeline_id = 2;
|
||||
// Term of the last entry.
|
||||
uint64 last_log_term = 3;
|
||||
// LSN of the last record.
|
||||
uint64 flush_lsn = 4;
|
||||
// Up to which LSN safekeeper regards its WAL as committed.
|
||||
uint64 commit_lsn = 5;
|
||||
// LSN up to which safekeeper has backed WAL.
|
||||
uint64 backup_lsn = 6;
|
||||
// LSN of last checkpoint uploaded by pageserver.
|
||||
uint64 remote_consistent_lsn = 7;
|
||||
uint64 peer_horizon_lsn = 8;
|
||||
uint64 local_start_lsn = 9;
|
||||
// A connection string to use for WAL receiving.
|
||||
string safekeeper_connstr = 10;
|
||||
}
|
||||
|
||||
message TenantTimelineId {
|
||||
bytes tenant_id = 1;
|
||||
bytes timeline_id = 2;
|
||||
}
|
||||
557
storage_broker/src/bin/storage_broker.rs
Normal file
557
storage_broker/src/bin/storage_broker.rs
Normal file
@@ -0,0 +1,557 @@
|
||||
//! Simple pub-sub based on grpc (tonic) and Tokio broadcast channel for storage
|
||||
//! nodes messaging.
|
||||
//!
|
||||
//! Subscriptions to 1) single timeline 2) all timelines are possible. We could
|
||||
//! add subscription to the set of timelines to save grpc streams, but testing
|
||||
//! shows many individual streams is also ok.
|
||||
//!
|
||||
//! Message is dropped if subscriber can't consume it, not affecting other
|
||||
//! subscribers.
|
||||
//!
|
||||
//! Only safekeeper message is supported, but it is not hard to add something
|
||||
//! else with generics.
|
||||
use clap::{command, Parser};
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use hyper::header::CONTENT_TYPE;
|
||||
use hyper::server::conn::AddrStream;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, StatusCode};
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::Poll;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::time;
|
||||
use tonic::codegen::Service;
|
||||
use tonic::transport::server::Connected;
|
||||
use tonic::Code;
|
||||
use tonic::{Request, Response, Status};
|
||||
use tracing::*;
|
||||
|
||||
use metrics::{Encoder, TextEncoder};
|
||||
use storage_broker::metrics::{NUM_PUBS, NUM_SUBS_ALL, NUM_SUBS_TIMELINE};
|
||||
use storage_broker::proto::broker_service_server::{BrokerService, BrokerServiceServer};
|
||||
use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey;
|
||||
use storage_broker::proto::{SafekeeperTimelineInfo, SubscribeSafekeeperInfoRequest};
|
||||
use storage_broker::{parse_proto_ttid, EitherBody, DEFAULT_LISTEN_ADDR};
|
||||
use utils::id::TenantTimelineId;
|
||||
use utils::logging::{self, LogFormat};
|
||||
use utils::project_git_version;
|
||||
|
||||
project_git_version!(GIT_VERSION);
|
||||
|
||||
const DEFAULT_CHAN_SIZE: usize = 128;
|
||||
const DEFAULT_HTTP2_KEEPALIVE_INTERVAL: &str = "5000ms";
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version = GIT_VERSION, about = "Broker for neon storage nodes communication", long_about = None)]
|
||||
struct Args {
|
||||
/// Endpoint to listen on.
|
||||
#[arg(short, long, default_value = DEFAULT_LISTEN_ADDR)]
|
||||
listen_addr: SocketAddr,
|
||||
/// Size of the queue to the subscriber.
|
||||
#[arg(long, default_value_t = DEFAULT_CHAN_SIZE)]
|
||||
chan_size: usize,
|
||||
/// HTTP/2 keepalive interval.
|
||||
#[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_HTTP2_KEEPALIVE_INTERVAL)]
|
||||
http2_keepalive_interval: Duration,
|
||||
/// Format for logging, either 'plain' or 'json'.
|
||||
#[arg(long, default_value = "plain")]
|
||||
log_format: String,
|
||||
}
|
||||
|
||||
type PubId = u64; // id of publisher for registering in maps
|
||||
type SubId = u64; // id of subscriber for registering in maps
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
enum SubscriptionKey {
|
||||
All,
|
||||
Timeline(TenantTimelineId),
|
||||
}
|
||||
|
||||
impl SubscriptionKey {
|
||||
// Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors).
|
||||
pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result<Self, Status> {
|
||||
match key {
|
||||
ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All),
|
||||
ProtoSubscriptionKey::TenantTimelineId(proto_ttid) => {
|
||||
Ok(SubscriptionKey::Timeline(parse_proto_ttid(&proto_ttid)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Channel to timeline subscribers.
|
||||
struct ChanToTimelineSub {
|
||||
chan: broadcast::Sender<SafekeeperTimelineInfo>,
|
||||
// Tracked separately to know when delete the shmem entry. receiver_count()
|
||||
// is unhandy for that as unregistering and dropping the receiver side
|
||||
// happens at different moments.
|
||||
num_subscribers: u64,
|
||||
}
|
||||
|
||||
struct SharedState {
|
||||
next_pub_id: PubId,
|
||||
num_pubs: i64,
|
||||
next_sub_id: SubId,
|
||||
num_subs_to_timelines: i64,
|
||||
chans_to_timeline_subs: HashMap<TenantTimelineId, ChanToTimelineSub>,
|
||||
num_subs_to_all: i64,
|
||||
chan_to_all_subs: broadcast::Sender<SafekeeperTimelineInfo>,
|
||||
}
|
||||
|
||||
impl SharedState {
|
||||
pub fn new(chan_size: usize) -> Self {
|
||||
SharedState {
|
||||
next_pub_id: 0,
|
||||
num_pubs: 0,
|
||||
next_sub_id: 0,
|
||||
num_subs_to_timelines: 0,
|
||||
chans_to_timeline_subs: HashMap::new(),
|
||||
num_subs_to_all: 0,
|
||||
chan_to_all_subs: broadcast::channel(chan_size).0,
|
||||
}
|
||||
}
|
||||
|
||||
// Register new publisher.
|
||||
pub fn register_publisher(&mut self) -> PubId {
|
||||
let pub_id = self.next_pub_id;
|
||||
self.next_pub_id += 1;
|
||||
self.num_pubs += 1;
|
||||
NUM_PUBS.set(self.num_pubs);
|
||||
pub_id
|
||||
}
|
||||
|
||||
// Unregister publisher.
|
||||
pub fn unregister_publisher(&mut self) {
|
||||
self.num_pubs -= 1;
|
||||
NUM_PUBS.set(self.num_pubs);
|
||||
}
|
||||
|
||||
// Register new subscriber.
|
||||
pub fn register_subscriber(
|
||||
&mut self,
|
||||
sub_key: SubscriptionKey,
|
||||
chan_size: usize,
|
||||
) -> (SubId, broadcast::Receiver<SafekeeperTimelineInfo>) {
|
||||
let sub_id = self.next_sub_id;
|
||||
self.next_sub_id += 1;
|
||||
let sub_rx = match sub_key {
|
||||
SubscriptionKey::All => {
|
||||
self.num_subs_to_all += 1;
|
||||
NUM_SUBS_ALL.set(self.num_subs_to_all);
|
||||
self.chan_to_all_subs.subscribe()
|
||||
}
|
||||
SubscriptionKey::Timeline(ttid) => {
|
||||
self.num_subs_to_timelines += 1;
|
||||
NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
|
||||
// Create new broadcast channel for this key, or subscriber to
|
||||
// the existing one.
|
||||
let chan_to_timeline_sub =
|
||||
self.chans_to_timeline_subs
|
||||
.entry(ttid)
|
||||
.or_insert(ChanToTimelineSub {
|
||||
chan: broadcast::channel(chan_size).0,
|
||||
num_subscribers: 0,
|
||||
});
|
||||
chan_to_timeline_sub.num_subscribers += 1;
|
||||
chan_to_timeline_sub.chan.subscribe()
|
||||
}
|
||||
};
|
||||
(sub_id, sub_rx)
|
||||
}
|
||||
|
||||
// Unregister the subscriber.
|
||||
pub fn unregister_subscriber(&mut self, sub_key: SubscriptionKey) {
|
||||
match sub_key {
|
||||
SubscriptionKey::All => {
|
||||
self.num_subs_to_all -= 1;
|
||||
NUM_SUBS_ALL.set(self.num_subs_to_all);
|
||||
}
|
||||
SubscriptionKey::Timeline(ttid) => {
|
||||
self.num_subs_to_timelines -= 1;
|
||||
NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
|
||||
|
||||
// Remove from the map, destroying the channel, if we are the
|
||||
// last subscriber to this timeline.
|
||||
|
||||
// Missing entry is a bug; we must have registered.
|
||||
let chan_to_timeline_sub = self
|
||||
.chans_to_timeline_subs
|
||||
.get_mut(&ttid)
|
||||
.expect("failed to find sub entry in shmem during unregister");
|
||||
chan_to_timeline_sub.num_subscribers -= 1;
|
||||
if chan_to_timeline_sub.num_subscribers == 0 {
|
||||
self.chans_to_timeline_subs.remove(&ttid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SharedState wrapper.
|
||||
#[derive(Clone)]
|
||||
struct Registry {
|
||||
shared_state: Arc<RwLock<SharedState>>,
|
||||
chan_size: usize,
|
||||
}
|
||||
|
||||
impl Registry {
|
||||
// Register new publisher in shared state.
|
||||
pub fn register_publisher(&self, remote_addr: SocketAddr) -> Publisher {
|
||||
let pub_id = self.shared_state.write().register_publisher();
|
||||
info!("publication started id={} addr={:?}", pub_id, remote_addr);
|
||||
Publisher {
|
||||
id: pub_id,
|
||||
registry: self.clone(),
|
||||
remote_addr,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unregister_publisher(&self, publisher: &Publisher) {
|
||||
self.shared_state.write().unregister_publisher();
|
||||
info!(
|
||||
"publication ended id={} addr={:?}",
|
||||
publisher.id, publisher.remote_addr
|
||||
);
|
||||
}
|
||||
|
||||
// Register new subscriber in shared state.
|
||||
pub fn register_subscriber(
|
||||
&self,
|
||||
sub_key: SubscriptionKey,
|
||||
remote_addr: SocketAddr,
|
||||
) -> Subscriber {
|
||||
let (sub_id, sub_rx) = self
|
||||
.shared_state
|
||||
.write()
|
||||
.register_subscriber(sub_key, self.chan_size);
|
||||
info!(
|
||||
"subscription started id={}, key={:?}, addr={:?}",
|
||||
sub_id, sub_key, remote_addr
|
||||
);
|
||||
Subscriber {
|
||||
id: sub_id,
|
||||
key: sub_key,
|
||||
sub_rx,
|
||||
registry: self.clone(),
|
||||
remote_addr,
|
||||
}
|
||||
}
|
||||
|
||||
// Unregister the subscriber
|
||||
pub fn unregister_subscriber(&self, subscriber: &Subscriber) {
|
||||
self.shared_state
|
||||
.write()
|
||||
.unregister_subscriber(subscriber.key);
|
||||
info!(
|
||||
"subscription ended id={}, key={:?}, addr={:?}",
|
||||
subscriber.id, subscriber.key, subscriber.remote_addr
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Private subscriber state.
|
||||
struct Subscriber {
|
||||
id: SubId,
|
||||
key: SubscriptionKey,
|
||||
// Subscriber receives messages from publishers here.
|
||||
sub_rx: broadcast::Receiver<SafekeeperTimelineInfo>,
|
||||
// to unregister itself from shared state in Drop
|
||||
registry: Registry,
|
||||
// for logging
|
||||
remote_addr: SocketAddr,
|
||||
}
|
||||
|
||||
impl Drop for Subscriber {
|
||||
fn drop(&mut self) {
|
||||
self.registry.unregister_subscriber(self);
|
||||
}
|
||||
}
|
||||
|
||||
// Private publisher state
|
||||
struct Publisher {
|
||||
id: PubId,
|
||||
registry: Registry,
|
||||
// for logging
|
||||
remote_addr: SocketAddr,
|
||||
}
|
||||
|
||||
impl Publisher {
|
||||
// Send msg to relevant subscribers.
|
||||
pub fn send_msg(&mut self, msg: &SafekeeperTimelineInfo) -> Result<(), Status> {
|
||||
// send message to subscribers for everything
|
||||
let shared_state = self.registry.shared_state.read();
|
||||
// Err means there is no subscribers, it is fine.
|
||||
shared_state.chan_to_all_subs.send(msg.clone()).ok();
|
||||
|
||||
// send message to per timeline subscribers
|
||||
let ttid =
|
||||
parse_proto_ttid(msg.tenant_timeline_id.as_ref().ok_or_else(|| {
|
||||
Status::new(Code::InvalidArgument, "missing tenant_timeline_id")
|
||||
})?)?;
|
||||
if let Some(subs) = shared_state.chans_to_timeline_subs.get(&ttid) {
|
||||
// Err can't happen here, as tx is destroyed only after removing
|
||||
// from the map the last subscriber along with tx.
|
||||
subs.chan
|
||||
.send(msg.clone())
|
||||
.expect("rx is still in the map with zero subscribers");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Publisher {
|
||||
fn drop(&mut self) {
|
||||
self.registry.unregister_publisher(self);
|
||||
}
|
||||
}
|
||||
|
||||
struct Broker {
|
||||
registry: Registry,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl BrokerService for Broker {
|
||||
async fn publish_safekeeper_info(
|
||||
&self,
|
||||
request: Request<tonic::Streaming<SafekeeperTimelineInfo>>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
let remote_addr = request
|
||||
.remote_addr()
|
||||
.expect("TCPConnectInfo inserted by handler");
|
||||
let mut publisher = self.registry.register_publisher(remote_addr);
|
||||
|
||||
let mut stream = request.into_inner();
|
||||
|
||||
loop {
|
||||
match stream.next().await {
|
||||
Some(Ok(msg)) => publisher.send_msg(&msg)?,
|
||||
Some(Err(e)) => return Err(e), // grpc error from the stream
|
||||
None => break, // closed stream
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Response::new(()))
|
||||
}
|
||||
|
||||
type SubscribeSafekeeperInfoStream =
|
||||
Pin<Box<dyn Stream<Item = Result<SafekeeperTimelineInfo, Status>> + Send + 'static>>;
|
||||
|
||||
async fn subscribe_safekeeper_info(
|
||||
&self,
|
||||
request: Request<SubscribeSafekeeperInfoRequest>,
|
||||
) -> Result<Response<Self::SubscribeSafekeeperInfoStream>, Status> {
|
||||
let remote_addr = request
|
||||
.remote_addr()
|
||||
.expect("TCPConnectInfo inserted by handler");
|
||||
let proto_key = request
|
||||
.into_inner()
|
||||
.subscription_key
|
||||
.ok_or_else(|| Status::new(Code::InvalidArgument, "missing subscription key"))?;
|
||||
let sub_key = SubscriptionKey::from_proto_subscription_key(proto_key)?;
|
||||
let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr);
|
||||
|
||||
// transform rx into stream with item = Result, as method result demands
|
||||
let output = async_stream::try_stream! {
|
||||
let mut warn_interval = time::interval(Duration::from_millis(1000));
|
||||
let mut missed_msgs: u64 = 0;
|
||||
loop {
|
||||
match subscriber.sub_rx.recv().await {
|
||||
Ok(info) => yield info,
|
||||
Err(RecvError::Lagged(skipped_msg)) => {
|
||||
missed_msgs += skipped_msg;
|
||||
if let Poll::Ready(_) = futures::poll!(Box::pin(warn_interval.tick())) {
|
||||
warn!("dropped {} messages, channel is full", missed_msgs);
|
||||
missed_msgs = 0;
|
||||
}
|
||||
}
|
||||
Err(RecvError::Closed) => {
|
||||
// can't happen, we never drop the channel while there is a subscriber
|
||||
Err(Status::new(Code::Internal, "channel unexpectantly closed"))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response::new(
|
||||
Box::pin(output) as Self::SubscribeSafekeeperInfoStream
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// We serve only metrics and healthcheck through http1.
|
||||
async fn http1_handler(
|
||||
req: hyper::Request<hyper::body::Body>,
|
||||
) -> Result<hyper::Response<Body>, Infallible> {
|
||||
let resp = match (req.method(), req.uri().path()) {
|
||||
(&Method::GET, "/metrics") => {
|
||||
let mut buffer = vec![];
|
||||
let metrics = metrics::gather();
|
||||
let encoder = TextEncoder::new();
|
||||
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||
|
||||
hyper::Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(CONTENT_TYPE, encoder.format_type())
|
||||
.body(Body::from(buffer))
|
||||
.unwrap()
|
||||
}
|
||||
(&Method::GET, "/status") => hyper::Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
_ => hyper::Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
};
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
logging::init(LogFormat::from_config(&args.log_format)?)?;
|
||||
info!("version: {GIT_VERSION}");
|
||||
|
||||
let registry = Registry {
|
||||
shared_state: Arc::new(RwLock::new(SharedState::new(args.chan_size))),
|
||||
chan_size: args.chan_size,
|
||||
};
|
||||
let storage_broker_impl = Broker {
|
||||
registry: registry.clone(),
|
||||
};
|
||||
let storage_broker_server = BrokerServiceServer::new(storage_broker_impl);
|
||||
|
||||
info!("listening on {}", &args.listen_addr);
|
||||
|
||||
// grpc is served along with http1 for metrics on a single port, hence we
|
||||
// don't use tonic's Server.
|
||||
hyper::Server::bind(&args.listen_addr)
|
||||
.http2_keep_alive_interval(Some(args.http2_keepalive_interval))
|
||||
.serve(make_service_fn(move |conn: &AddrStream| {
|
||||
let storage_broker_server_cloned = storage_broker_server.clone();
|
||||
let connect_info = conn.connect_info();
|
||||
async move {
|
||||
Ok::<_, Infallible>(service_fn(move |mut req| {
|
||||
// That's what tonic's MakeSvc.call does to pass conninfo to
|
||||
// the request handler (and where its request.remote_addr()
|
||||
// expects it to find).
|
||||
req.extensions_mut().insert(connect_info.clone());
|
||||
|
||||
// Technically this second clone is not needed, but consume
|
||||
// by async block is apparently unavoidable. BTW, error
|
||||
// message is enigmatic, see
|
||||
// https://github.com/rust-lang/rust/issues/68119
|
||||
//
|
||||
// We could get away without async block at all, but then we
|
||||
// need to resort to futures::Either to merge the result,
|
||||
// which doesn't caress an eye as well.
|
||||
let mut storage_broker_server_svc = storage_broker_server_cloned.clone();
|
||||
async move {
|
||||
if req.headers().get("content-type").map(|x| x.as_bytes())
|
||||
== Some(b"application/grpc")
|
||||
{
|
||||
let res_resp = storage_broker_server_svc.call(req).await;
|
||||
// Grpc and http1 handlers have slightly different
|
||||
// Response types: it is UnsyncBoxBody for the
|
||||
// former one (not sure why) and plain hyper::Body
|
||||
// for the latter. Both implement HttpBody though,
|
||||
// and EitherBody is used to merge them.
|
||||
res_resp.map(|resp| resp.map(EitherBody::Left))
|
||||
} else {
|
||||
let res_resp = http1_handler(req).await;
|
||||
res_resp.map(|resp| resp.map(EitherBody::Right))
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
}))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
|
||||
use tokio::sync::broadcast::error::TryRecvError;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
fn msg(timeline_id: Vec<u8>) -> SafekeeperTimelineInfo {
|
||||
SafekeeperTimelineInfo {
|
||||
safekeeper_id: 1,
|
||||
tenant_timeline_id: Some(ProtoTenantTimelineId {
|
||||
tenant_id: vec![0x00; 16],
|
||||
timeline_id,
|
||||
}),
|
||||
last_log_term: 0,
|
||||
flush_lsn: 1,
|
||||
commit_lsn: 2,
|
||||
backup_lsn: 3,
|
||||
remote_consistent_lsn: 4,
|
||||
peer_horizon_lsn: 5,
|
||||
safekeeper_connstr: "neon-1-sk-1.local:7676".to_owned(),
|
||||
local_start_lsn: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn tli_from_u64(i: u64) -> Vec<u8> {
|
||||
let mut timeline_id = vec![0xFF; 8];
|
||||
timeline_id.extend_from_slice(&i.to_be_bytes());
|
||||
timeline_id
|
||||
}
|
||||
|
||||
fn mock_addr() -> SocketAddr {
|
||||
"127.0.0.1:8080".parse().unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_registry() {
|
||||
let registry = Registry {
|
||||
shared_state: Arc::new(RwLock::new(SharedState::new(16))),
|
||||
chan_size: 16,
|
||||
};
|
||||
|
||||
// subscribe to timeline 2
|
||||
let ttid_2 = TenantTimelineId {
|
||||
tenant_id: TenantId::from_slice(&[0x00; 16]).unwrap(),
|
||||
timeline_id: TimelineId::from_slice(&tli_from_u64(2)).unwrap(),
|
||||
};
|
||||
let sub_key_2 = SubscriptionKey::Timeline(ttid_2);
|
||||
let mut subscriber_2 = registry.register_subscriber(sub_key_2, mock_addr());
|
||||
let mut subscriber_all = registry.register_subscriber(SubscriptionKey::All, mock_addr());
|
||||
|
||||
// send two messages with different keys
|
||||
let msg_1 = msg(tli_from_u64(1));
|
||||
let msg_2 = msg(tli_from_u64(2));
|
||||
let mut publisher = registry.register_publisher(mock_addr());
|
||||
publisher.send_msg(&msg_1).expect("failed to send msg");
|
||||
publisher.send_msg(&msg_2).expect("failed to send msg");
|
||||
|
||||
// msg with key 2 should arrive to subscriber_2
|
||||
assert_eq!(subscriber_2.sub_rx.try_recv().unwrap(), msg_2);
|
||||
|
||||
// but nothing more
|
||||
assert_eq!(
|
||||
subscriber_2.sub_rx.try_recv().unwrap_err(),
|
||||
TryRecvError::Empty
|
||||
);
|
||||
|
||||
// subscriber_all should receive both messages
|
||||
assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_1);
|
||||
assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_2);
|
||||
assert_eq!(
|
||||
subscriber_all.sub_rx.try_recv().unwrap_err(),
|
||||
TryRecvError::Empty
|
||||
);
|
||||
}
|
||||
}
|
||||
108
storage_broker/src/lib.rs
Normal file
108
storage_broker/src/lib.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use hyper::body::HttpBody;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tonic::codegen::StdError;
|
||||
use tonic::{transport::Channel, Code, Status};
|
||||
use utils::id::{TenantId, TenantTimelineId, TimelineId};
|
||||
|
||||
use proto::{
|
||||
broker_service_client::BrokerServiceClient, TenantTimelineId as ProtoTenantTimelineId,
|
||||
};
|
||||
|
||||
// Code generated by protobuf.
|
||||
pub mod proto {
|
||||
tonic::include_proto!("storage_broker");
|
||||
}
|
||||
|
||||
pub mod metrics;
|
||||
|
||||
// Re-exports to avoid direct tonic dependency in user crates.
|
||||
pub use tonic::Request;
|
||||
pub use tonic::Streaming;
|
||||
|
||||
pub const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:50051";
|
||||
|
||||
// NeonBrokerClient charged with tonic provided Channel transport; helps to
|
||||
// avoid depending on tonic directly in user crates.
|
||||
pub type BrokerClientChannel = BrokerServiceClient<Channel>;
|
||||
|
||||
impl BrokerClientChannel {
|
||||
/// Create a new client to the given endpoint, but don't actually connect until the first request.
|
||||
pub async fn connect_lazy<D>(dst: D) -> Result<Self, tonic::transport::Error>
|
||||
where
|
||||
D: std::convert::TryInto<tonic::transport::Endpoint>,
|
||||
D::Error: Into<StdError>,
|
||||
{
|
||||
let conn = tonic::transport::Endpoint::new(dst)?.connect_lazy();
|
||||
Ok(Self::new(conn))
|
||||
}
|
||||
}
|
||||
|
||||
// parse variable length bytes from protobuf
|
||||
pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result<TenantTimelineId, Status> {
|
||||
let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id)
|
||||
.map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?;
|
||||
let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id).map_err(|e| {
|
||||
Status::new(
|
||||
Code::InvalidArgument,
|
||||
format!("malformed timeline_id: {}", e),
|
||||
)
|
||||
})?;
|
||||
Ok(TenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
})
|
||||
}
|
||||
|
||||
// These several usages don't justify anyhow dependency, though it would work as
|
||||
// well.
|
||||
type AnyError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
// Provides impl HttpBody for two different types implementing it. Inspired by
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/hyper_warp/server.rs
|
||||
pub enum EitherBody<A, B> {
|
||||
Left(A),
|
||||
Right(B),
|
||||
}
|
||||
|
||||
impl<A, B> HttpBody for EitherBody<A, B>
|
||||
where
|
||||
A: HttpBody + Send + Unpin,
|
||||
B: HttpBody<Data = A::Data> + Send + Unpin,
|
||||
A::Error: Into<AnyError>,
|
||||
B::Error: Into<AnyError>,
|
||||
{
|
||||
type Data = A::Data;
|
||||
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
fn is_end_stream(&self) -> bool {
|
||||
match self {
|
||||
EitherBody::Left(b) => b.is_end_stream(),
|
||||
EitherBody::Right(b) => b.is_end_stream(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_data(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
|
||||
match self.get_mut() {
|
||||
EitherBody::Left(b) => Pin::new(b).poll_data(cx).map(map_option_err),
|
||||
EitherBody::Right(b) => Pin::new(b).poll_data(cx).map(map_option_err),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_trailers(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<Option<hyper::HeaderMap>, Self::Error>> {
|
||||
match self.get_mut() {
|
||||
EitherBody::Left(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into),
|
||||
EitherBody::Right(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_option_err<T, U: Into<AnyError>>(err: Option<Result<T, U>>) -> Option<Result<T, AnyError>> {
|
||||
err.map(|e| e.map_err(Into::into))
|
||||
}
|
||||
25
storage_broker/src/metrics.rs
Normal file
25
storage_broker/src/metrics.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
//! Broker metrics.
|
||||
|
||||
use metrics::{register_int_gauge, IntGauge};
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
pub static NUM_PUBS: Lazy<IntGauge> = Lazy::new(|| {
|
||||
register_int_gauge!("storage_broker_active_publishers", "Number of publications")
|
||||
.expect("Failed to register metric")
|
||||
});
|
||||
|
||||
pub static NUM_SUBS_TIMELINE: Lazy<IntGauge> = Lazy::new(|| {
|
||||
register_int_gauge!(
|
||||
"storage_broker_per_timeline_active_subscribers",
|
||||
"Number of subsciptions to particular tenant timeline id"
|
||||
)
|
||||
.expect("Failed to register metric")
|
||||
});
|
||||
|
||||
pub static NUM_SUBS_ALL: Lazy<IntGauge> = Lazy::new(|| {
|
||||
register_int_gauge!(
|
||||
"storage_broker_all_keys_active_subscribers",
|
||||
"Number of subsciptions to all keys"
|
||||
)
|
||||
.expect("Failed to register metric")
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user